summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 23:26:30 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-17 23:26:30 (GMT)
commita7fd20d1c476af4563e66865213474a2f9f473a4 (patch)
treefb1399e2f82842450245fb058a8fb23c52865f43 /drivers/net/ethernet/intel
parentb80fed9595513384424cd141923c9161c4b5021b (diff)
parent917fa5353da05e8a0045b8acacba8d50400d5b12 (diff)
downloadlinux-a7fd20d1c476af4563e66865213474a2f9f473a4.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support SPI based w5100 devices, from Akinobu Mita. 2) Partial Segmentation Offload, from Alexander Duyck. 3) Add GMAC4 support to stmmac driver, from Alexandre TORGUE. 4) Allow cls_flower stats offload, from Amir Vadai. 5) Implement bpf blinding, from Daniel Borkmann. 6) Optimize _ASYNC_ bit twiddling on sockets, unless the socket is actually using FASYNC these atomics are superfluous. From Eric Dumazet. 7) Run TCP more preemptibly, also from Eric Dumazet. 8) Support LED blinking, EEPROM dumps, and rxvlan offloading in mlx5e driver, from Gal Pressman. 9) Allow creating ppp devices via rtnetlink, from Guillaume Nault. 10) Improve BPF usage documentation, from Jesper Dangaard Brouer. 11) Support tunneling offloads in qed, from Manish Chopra. 12) aRFS offloading in mlx5e, from Maor Gottlieb. 13) Add RFS and RPS support to SCTP protocol, from Marcelo Ricardo Leitner. 14) Add MSG_EOR support to TCP, this allows controlling packet coalescing on application record boundaries for more accurate socket timestamp sampling. From Martin KaFai Lau. 15) Fix alignment of 64-bit netlink attributes across the board, from Nicolas Dichtel. 16) Per-vlan stats in bridging, from Nikolay Aleksandrov. 17) Several conversions of drivers to ethtool ksettings, from Philippe Reynes. 18) Checksum neutral ILA in ipv6, from Tom Herbert. 19) Factorize all of the various marvell dsa drivers into one, from Vivien Didelot 20) Add VF support to qed driver, from Yuval Mintz" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1649 commits) Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m" Revert "phy dp83867: Make rgmii parameters optional" r8169: default to 64-bit DMA on recent PCIe chips phy dp83867: Make rgmii parameters optional phy dp83867: Fix compilation with CONFIG_OF_MDIO=m bpf: arm64: remove callee-save registers use for tmp registers asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions switchdev: pass pointer to fib_info instead of copy net_sched: close another race condition in tcf_mirred_release() tipc: fix nametable publication field in nl compat drivers: net: Don't print unpopulated net_device name qed: add support for dcbx. ravb: Add missing free_irq() calls to ravb_close() qed: Remove a stray tab net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fec-mpc52xx: use phydev from struct net_device bpf, doc: fix typo on bpf_asm descriptions stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fs-enet: use phydev from struct net_device ...
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig80
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h111
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c61
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c44
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c163
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h51
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c352
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c150
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c226
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c141
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c462
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h28
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c63
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c168
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c386
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c284
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1065
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c509
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h70
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1021
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c329
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c503
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c145
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h30
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h108
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h40
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c221
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c42
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c196
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h99
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1052
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c117
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h302
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c693
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c230
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h37
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c298
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c224
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h6
116 files changed, 6450 insertions, 5222 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3772f3a..714bd10 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -25,16 +25,13 @@ config E100
on the adapter. Look for a label that has a barcode and a number
in the format 123456-001 (six digits hyphen three digits).
- Use the above information and the Adapter & Driver ID Guide at:
+ Use the above information and the Adapter & Driver ID Guide that
+ can be located at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com>
to identify the adapter.
- For the latest Intel PRO/100 network driver for Linux, see:
-
- <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
-
More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
@@ -47,12 +44,7 @@ config E1000
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -71,12 +63,8 @@ config E1000E
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
use the regular e1000 driver For more information on how to
- identify your adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ identify your adapter, go to the Adapter & Driver ID Guide that
+ can be located at:
<http://support.intel.com>
@@ -101,12 +89,7 @@ config IGB
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -142,12 +125,7 @@ config IGBVF
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -164,12 +142,7 @@ config IXGB
This driver supports Intel(R) PRO/10GbE family of adapters for
PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -187,12 +160,7 @@ config IXGBE
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -243,12 +211,7 @@ config IXGBEVF
---help---
This driver supports Intel(R) PCI Express virtual functions for the
Intel(R) ixgbe driver. For more information on how to identify your
- adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ adapter, go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -266,12 +229,7 @@ config I40E
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -326,12 +284,7 @@ config I40EVF
---help---
This driver supports Intel(R) XL710 and X710 virtual functions.
For more information on how to identify your adapter, go to the
- Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -347,12 +300,7 @@ config FM10K
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
- go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2..d7bdea7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c..975eeb8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000_close(netdev);
else
e1000_reset(adapter);
@@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev,
e1000_reset(adapter);
clear_bit(__E1000_TESTING, &adapter->flags);
if (if_running)
- dev_open(netdev);
+ e1000_open(netdev);
} else {
e_info(hw, "online testing starting\n");
/* Online tests */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ae90d4f..f42129d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
static int e1000_sw_init(struct e1000_adapter *adapter);
-static int e1000_open(struct net_device *netdev);
-static int e1000_close(struct net_device *netdev);
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
static void e1000_configure_tx(struct e1000_adapter *adapter);
static void e1000_configure_rx(struct e1000_adapter *adapter);
static void e1000_setup_rctl(struct e1000_adapter *adapter);
@@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1437,7 +1437,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 2af603f..cd39137 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
return 0;
}
@@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
if (hw->phy.media_type != e1000_media_type_copper)
- reg &= ~(1 << 20);
+ reg &= ~BIT(20);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
/* Disable IPv6 extension header parsing because some malformed
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 5f70164..7fd4d54 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
break;
}
@@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
@@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26);
break;
case e1000_82574:
case e1000_82583:
- reg |= (1 << 26);
+ reg |= BIT(26);
break;
default:
break;
@@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg &= ~((1 << 29) | (1 << 30));
- reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg &= ~(BIT(29) | BIT(30));
+ reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26);
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
break;
default:
@@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL);
- reg &= ~(1 << 29);
+ reg &= ~BIT(29);
ew32(CTRL, reg);
break;
default:
@@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL_EXT);
- reg &= ~(1 << 23);
- reg |= (1 << 22);
+ reg &= ~BIT(23);
+ reg |= BIT(22);
ew32(CTRL_EXT, reg);
break;
default:
@@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(GCR);
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(GCR, reg);
/* Workaround for hardware errata.
@@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg =
- 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ BIT(hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc293b..ef96cd1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -109,18 +109,18 @@ struct e1000_info;
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
- (1 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
+ (1u << 16) | /* wthresh must be +1 more than desired */\
+ (1u << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
#define E1000_RXDCTL_DMA_BURST_ENABLE \
(0x01000000 | /* set descriptor granularity */ \
- (4 << 16) | /* set writeback threshold */ \
- (4 << 8) | /* set prefetch threshold */ \
+ (4u << 16) | /* set writeback threshold */ \
+ (4u << 8) | /* set prefetch threshold */ \
0x20) /* set hthresh */
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
+#define E1000_TIDV_FPD BIT(31)
+#define E1000_RDTR_FPD BIT(31)
enum e1000_boards {
board_82571,
@@ -347,6 +347,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
+ s32 ptp_delta;
u16 eee_advert;
};
@@ -404,53 +405,53 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT (1 << 0)
-#define FLAG_HAS_FLASH (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
-#define FLAG_HAS_WOL (1 << 3)
-/* reserved bit4 */
-#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
-#define FLAG_READ_ONLY_NVM (1 << 8)
-#define FLAG_IS_ICH (1 << 9)
-#define FLAG_HAS_MSIX (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
-#define FLAG_IS_QUAD_PORT_A (1 << 12)
-#define FLAG_IS_QUAD_PORT (1 << 13)
-#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
-#define FLAG_APME_IN_WUC (1 << 15)
-#define FLAG_APME_IN_CTRL3 (1 << 16)
-#define FLAG_APME_CHECK_PORT_B (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
-#define FLAG_NO_WAKE_UCAST (1 << 19)
-#define FLAG_MNG_PT_ENABLED (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
-#define FLAG_RX_NEEDS_RESTART (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
-#define FLAG_SMART_POWER_DOWN (1 << 26)
-#define FLAG_MSI_ENABLED (1 << 27)
-/* reserved (1 << 28) */
-#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RESTART_NOW (1 << 30)
-#define FLAG_MSI_TEST_FAILED (1 << 31)
-
-#define FLAG2_CRC_STRIPPING (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
-#define FLAG2_IS_DISCARDING (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
-#define FLAG2_HAS_PHY_STATS (1 << 4)
-#define FLAG2_HAS_EEE (1 << 5)
-#define FLAG2_DMA_BURST (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
-#define FLAG2_DISABLE_AIM (1 << 8)
-#define FLAG2_CHECK_PHY_HANG (1 << 9)
-#define FLAG2_NO_DISABLE_RX (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
-#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
-#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
+#define FLAG_HAS_AMT BIT(0)
+#define FLAG_HAS_FLASH BIT(1)
+#define FLAG_HAS_HW_VLAN_FILTER BIT(2)
+#define FLAG_HAS_WOL BIT(3)
+/* reserved BIT(4) */
+#define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5)
+#define FLAG_HAS_SWSM_ON_LOAD BIT(6)
+#define FLAG_HAS_JUMBO_FRAMES BIT(7)
+#define FLAG_READ_ONLY_NVM BIT(8)
+#define FLAG_IS_ICH BIT(9)
+#define FLAG_HAS_MSIX BIT(10)
+#define FLAG_HAS_SMART_POWER_DOWN BIT(11)
+#define FLAG_IS_QUAD_PORT_A BIT(12)
+#define FLAG_IS_QUAD_PORT BIT(13)
+#define FLAG_HAS_HW_TIMESTAMP BIT(14)
+#define FLAG_APME_IN_WUC BIT(15)
+#define FLAG_APME_IN_CTRL3 BIT(16)
+#define FLAG_APME_CHECK_PORT_B BIT(17)
+#define FLAG_DISABLE_FC_PAUSE_TIME BIT(18)
+#define FLAG_NO_WAKE_UCAST BIT(19)
+#define FLAG_MNG_PT_ENABLED BIT(20)
+#define FLAG_RESET_OVERWRITES_LAA BIT(21)
+#define FLAG_TARC_SPEED_MODE_BIT BIT(22)
+#define FLAG_TARC_SET_BIT_ZERO BIT(23)
+#define FLAG_RX_NEEDS_RESTART BIT(24)
+#define FLAG_LSC_GIG_SPEED_DROP BIT(25)
+#define FLAG_SMART_POWER_DOWN BIT(26)
+#define FLAG_MSI_ENABLED BIT(27)
+/* reserved BIT(28) */
+#define FLAG_TSO_FORCE BIT(29)
+#define FLAG_RESTART_NOW BIT(30)
+#define FLAG_MSI_TEST_FAILED BIT(31)
+
+#define FLAG2_CRC_STRIPPING BIT(0)
+#define FLAG2_HAS_PHY_WAKEUP BIT(1)
+#define FLAG2_IS_DISCARDING BIT(2)
+#define FLAG2_DISABLE_ASPM_L1 BIT(3)
+#define FLAG2_HAS_PHY_STATS BIT(4)
+#define FLAG2_HAS_EEE BIT(5)
+#define FLAG2_DMA_BURST BIT(6)
+#define FLAG2_DISABLE_ASPM_L0S BIT(7)
+#define FLAG2_DISABLE_AIM BIT(8)
+#define FLAG2_CHECK_PHY_HANG BIT(9)
+#define FLAG2_NO_DISABLE_RX BIT(10)
+#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
+#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
+#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -480,6 +481,8 @@ extern const char e1000e_driver_version[];
void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
+int e1000e_open(struct net_device *netdev);
+int e1000e_close(struct net_device *netdev);
void e1000e_up(struct e1000_adapter *adapter);
void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6cab1f3..7aff68a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
return 0;
}
@@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->forced_speed_duplex = ADVERTISE_100_FULL;
break;
case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised =
+ ADVERTISE_1000_FULL;
+ } else {
+ mac->forced_speed_duplex = ADVERTISE_1000_FULL;
+ }
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
@@ -439,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -895,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- mask |= (1 << 18);
+ mask |= BIT(18);
break;
default:
break;
@@ -914,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* SHRAH[9] different than the others */
if (i == 10)
- mask |= (1 << 30);
+ mask |= BIT(30);
else
- mask &= ~(1 << 30);
+ mask &= ~BIT(30);
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
@@ -924,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
if (i == 4)
- mask |= (1 << 30);
+ mask |= BIT(30);
/* RAR[1-6] owned by management engine - skipping */
if (i > 0)
i += 6;
@@ -1019,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (i = 0; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (adapter->flags & FLAG_IS_ICH) {
switch (mask) {
@@ -1387,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
case e1000_phy_82579:
/* Disable PHY energy detect power down */
e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
- e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
/* Disable full chip energy detect */
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
@@ -1453,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* disable autoneg */
ctrl = er32(TXCW);
- ctrl &= ~(1 << 31);
+ ctrl &= ~BIT(31);
ew32(TXCW, ctrl);
link = (er32(STATUS) & E1000_STATUS_LU);
@@ -1816,7 +1825,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000e_close(netdev);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1849,7 +1858,7 @@ static void e1000_diag_test(struct net_device *netdev,
clear_bit(__E1000_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ e1000e_open(netdev);
} else {
/* Online tests */
@@ -2283,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_ALL));
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_ALL));
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index c0f4887..3e11322 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
while (value > PCI_LTR_VALUE_MASK) {
scale++;
- value = DIV_ROUND_UP(value, (1 << 5));
+ value = DIV_ROUND_UP(value, BIT(5));
}
if (scale > E1000_LTRV_SCALE_MAX) {
e_dbg("Invalid LTR latency scale %d\n", scale);
@@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
- phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+ phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
break;
@@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
/* Restore SMBus frequency */
if (freq--) {
phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
- phy_data |= (freq & (1 << 0)) <<
+ phy_data |= (freq & BIT(0)) <<
HV_SMB_ADDR_FREQ_LOW_SHIFT;
- phy_data |= (freq & (1 << 1)) <<
+ phy_data |= (freq & BIT(1)) <<
(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
} else {
e_dbg("Unsupported SMB frequency in PHY\n");
@@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* disable Rx path while enabling/disabling workaround */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
if (ret_val)
return ret_val;
@@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* Enable jumbo frame workaround in the MAC */
mac_reg = er32(FFLT_DBG);
- mac_reg &= ~(1 << 14);
+ mac_reg &= ~BIT(14);
mac_reg |= (7 << 15);
ew32(FFLT_DBG, mac_reg);
@@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data | (1 << 0));
+ data | BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 13);
+ data &= ~BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
if (ret_val)
return ret_val;
} else {
@@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data & ~(1 << 0));
+ data & ~BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data |= (1 << 13);
+ data |= BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
if (ret_val)
return ret_val;
}
/* re-enable Rx path after enabling/disabling workaround */
- return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
}
/**
@@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Extended Device Control */
reg = er32(CTRL_EXT);
- reg |= (1 << 22);
+ reg |= BIT(22);
/* Enable PHY low-power state when MAC is at D3 w/o WoL */
if (hw->mac.type >= e1000_pchlan)
reg |= E1000_CTRL_EXT_PHYPDEN;
@@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
if (hw->mac.type == e1000_ich8lan)
- reg |= (1 << 28) | (1 << 29);
- reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ reg |= BIT(28) | BIT(29);
+ reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
- reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ reg |= BIT(28);
+ reg |= BIT(24) | BIT(26) | BIT(30);
ew32(TARC(1), reg);
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
reg = er32(STATUS);
- reg &= ~(1 << 31);
+ reg &= ~BIT(31);
ew32(STATUS, reg);
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2311f60..67163ca 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -73,10 +73,10 @@
(ID_LED_OFF1_ON2 << 4) | \
(ID_LED_DEF1_DEF2))
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
+#define E1000_ICH_NVM_SIG_WORD 0x13u
+#define E1000_ICH_NVM_SIG_MASK 0xC000u
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u
+#define E1000_ICH_NVM_SIG_VALUE 0x80u
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e59d7c2..b322011 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9b4ec13..75e6089 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
else
next_desc = "";
pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
@@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
adapter->eiac_mask |= E1000_IMS_OTHER;
/* Cause Tx interrupts on every write back */
- ivar |= (1 << 31);
+ ivar |= BIT(31);
ew32(IVAR, ivar);
@@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta |= (1 << (vid & 0x1F));
+ vfta |= BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
+ vfta &= ~BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
/* Enable this decision filter in MANC2H */
if (mdef)
- manc2h |= (1 << i);
+ manc2h |= BIT(i);
j |= mdef;
}
@@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
if (er32(MDEF(i)) == 0) {
ew32(MDEF(i), (E1000_MDEF_PORT_623 |
E1000_MDEF_PORT_664));
- manc2h |= (1 << 1);
+ manc2h |= BIT(1);
j++;
break;
}
@@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
-#define SPEED_MODE_BIT (1 << 21)
+#define SPEED_MODE_BIT BIT(21)
tarc |= SPEED_MODE_BIT;
ew32(TARC(0), tarc);
}
@@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
phy_data &= 0xfff8;
- phy_data |= (1 << 2);
+ phy_data |= BIT(2);
e1e_wphy(hw, PHY_REG(770, 26), phy_data);
e1e_rphy(hw, 22, &phy_data);
phy_data &= 0x0fff;
- phy_data |= (1 << 14);
+ phy_data |= BIT(14);
e1e_wphy(hw, 0x10, 0x2823);
e1e_wphy(hw, 0x11, 0x0003);
e1e_wphy(hw, 22, phy_data);
@@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
- int rval;
+ int ret_val;
if (!rar_entries)
break;
- rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
- if (rval < 0)
+ ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (ret_val < 0)
return -ENOMEM;
count++;
}
@@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
- if (!(fextnvm7 & (1 << 0))) {
- ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ if (!(fextnvm7 & BIT(0))) {
+ ew32(FEXTNVM7, fextnvm7 | BIT(0));
e1e_flush();
}
}
@@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l4 = false;
bool is_l2 = false;
u32 regval;
- s32 ret_val;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
@@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
er32(RXSTMPH);
er32(TXSTMPH);
- /* Get and set the System Time Register SYSTIM base frequency */
- ret_val = e1000e_get_base_timinca(adapter, &regval);
- if (ret_val)
- return ret_val;
- ew32(TIMINCA, regval);
-
- /* reset the ns time counter */
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
return 0;
}
@@ -3839,7 +3828,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/* update thresholds: prefetch threshold to 31, host threshold to 1
* and make sure the granularity is "descriptors" and not "cache lines"
*/
- rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+ rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
ew32(RXDCTL(0), rxdctl);
/* momentarily enable the RX ring for the changes to take effect */
@@ -3885,6 +3874,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
}
/**
+ * e1000e_systim_reset - reset the timesync registers after a hardware reset
+ * @adapter: board private structure
+ *
+ * When the MAC is reset, all hardware bits for timesync will be reset to the
+ * default values. This function will restore the settings last in place.
+ * Since the clock SYSTIME registers are reset, we will simply restore the
+ * cyclecounter to the kernel real clock time.
+ **/
+static void e1000e_systim_reset(struct e1000_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp_clock_info;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ u32 timinca;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ if (info->adjfreq) {
+ /* restore the previous ptp frequency delta */
+ ret_val = info->adjfreq(info, adapter->ptp_delta);
+ } else {
+ /* set the default base frequency if no adjustment possible */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (!ret_val)
+ ew32(TIMINCA, timinca);
+ }
+
+ if (ret_val) {
+ dev_warn(&adapter->pdev->dev,
+ "Failed to restore TIMINCA clock rate delta: %d\n",
+ ret_val);
+ return;
+ }
+
+ /* reset the systim ns time counter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ /* restore the previous hwtstamp configuration settings */
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
- /* initialize systim and reset the ns time counter */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ /* restore systim and hwtstamp settings */
+ e1000e_systim_reset(adapter);
/* Set EEE advertisement as appropriate */
if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -4275,7 +4311,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel_1, systimel_2, systimeh;
+ u32 systimel, systimeh;
cycle_t systim, systim_next;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4283,24 +4319,25 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
- systimel_1 = er32(SYSTIML);
+ systimel = er32(SYSTIML);
systimeh = er32(SYSTIMH);
- systimel_2 = er32(SYSTIML);
- /* Check for overflow. If there was no overflow, use the values */
- if (systimel_1 < systimel_2) {
- systim = (cycle_t)systimel_1;
- systim |= (cycle_t)systimeh << 32;
- } else {
- /* There was an overflow, read again SYSTIMH, and use
- * systimel_2
- */
- systimeh = er32(SYSTIMH);
- systim = (cycle_t)systimel_2;
- systim |= (cycle_t)systimeh << 32;
+ /* Is systimel is so large that overflow is possible? */
+ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
+ u32 systimel_2 = er32(SYSTIML);
+ if (systimel > systimel_2) {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systimel = systimel_2;
+ }
}
+ systim = (cycle_t)systimel;
+ systim |= (cycle_t)systimeh << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 incvalue, time_delta, rem, temp;
+ u64 time_delta, rem, temp;
+ u32 incvalue;
int i;
/* errata for 82574/82583 possible bad bits read from SYSTIMH/L
@@ -4495,7 +4532,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
}
/**
- * e1000_open - Called when a network interface is made active
+ * e1000e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
@@ -4506,7 +4543,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000e_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4604,7 +4641,7 @@ err_setup_tx:
}
/**
- * e1000_close - Disables a network interface
+ * e1000e_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
@@ -4614,7 +4651,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000e_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -6861,7 +6898,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
le16_to_cpus(&buf);
- if (!ret_val && (!(buf & (1 << 0)))) {
+ if (!ret_val && (!(buf & BIT(0)))) {
/* Deep Smart Power Down (DSPD) */
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
@@ -6920,8 +6957,8 @@ static int e1000_set_features(struct net_device *netdev,
}
static const struct net_device_ops e1000e_netdev_ops = {
- .ndo_open = e1000_open,
- .ndo_stop = e1000_close,
+ .ndo_open = e1000e_open,
+ .ndo_stop = e1000e_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_rx_mode = e1000e_set_rx_mode,
@@ -6965,7 +7002,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- s32 rval = 0;
+ s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -7200,18 +7237,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
(adapter->hw.bus.func == 1))
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_B,
1, &eeprom_data);
else
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A,
1, &eeprom_data);
}
/* fetch WoL from EEPROM */
- if (rval)
- e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+ if (ret_val)
+ e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
else if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -7231,13 +7268,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
device_wakeup_enable(&pdev->dev);
/* save off EEPROM version number */
- rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+ ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
- if (rval) {
- e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+ if (ret_val) {
+ e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
adapter->eeprom_vers = 0;
}
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -7256,9 +7296,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- /* init PTP hardware clock */
- e1000e_ptp_init(adapter);
-
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 49f205c..2efd80d 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = er32(EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index de13aea..d78d47b 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if ((hw->phy.type == e1000_phy_82578) &&
(hw->phy.revision >= 1) &&
(hw->phy.addr == 2) &&
- !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+ !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) {
u16 data2 = 0x7EFF;
ret_val = e1000_access_phy_debug_regs_hv(hw,
- (1 << 6) | 0x3,
+ BIT(6) | 0x3,
&data2, false);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 55bfe47..3027f63 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
+#define BM_WUC_ENABLE_BIT BIT(2)
+#define BM_WUC_HOST_WU_BIT BIT(4)
+#define BM_WUC_ME_WU_BIT BIT(5)
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
@@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define I82578_ADDR_REG 29
#define I82577_ADDR_REG 16
#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */
#define I82577_CTRL_REG 23
/* 82577 specific PHY registers */
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index e2ff3ef..2e1b17a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ adapter->ptp_delta = delta;
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index b006ff6..cac6453 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
-# Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
+# Intel(R) Ethernet Switch Host Interface Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,7 @@
################################################################################
#
-# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
+# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
#
obj-$(CONFIG_FM10K) += fm10k.o
@@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o
fm10k-y := fm10k_main.o \
fm10k_common.o \
fm10k_pci.o \
- fm10k_ptp.o \
fm10k_netdev.o \
fm10k_ethtool.o \
fm10k_pf.o \
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b34bb00..fcf106e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,9 +27,6 @@
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -262,12 +259,12 @@ struct fm10k_intfc {
unsigned long state;
u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0)
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1)
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
-#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
-#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5)
+#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
+#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
+#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
+#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
+#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
+#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
int xcast_mode;
/* Tx fast path data */
@@ -333,6 +330,7 @@ struct fm10k_intfc {
unsigned long last_reset;
unsigned long link_down_event;
bool host_ready;
+ bool lport_map_failed;
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
@@ -342,22 +340,8 @@ struct fm10k_intfc {
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
-
#endif /* CONFIG_DEBUG_FS */
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *ptp_clock;
-
- struct sk_buff_head ts_tx_skb_queue;
- u32 tx_hwtstamp_timeouts;
- struct hwtstamp_config ts_config;
- /* We are unable to actually adjust the clock beyond the frequency
- * value. Once the clock is started there is no resetting it. As
- * such we maintain a separate offset from the actual hardware clock
- * to allow for offset adjustment.
- */
- s64 ptp_adjust;
- rwlock_t systime_lock;
#ifdef CONFIG_DCB
u8 pfc_en;
#endif
@@ -510,6 +494,8 @@ int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
+u32 fm10k_get_reta_size(struct net_device *netdev);
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
@@ -544,21 +530,6 @@ static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
-/* Time Stamping */
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime);
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime);
-void fm10k_ts_reset(struct fm10k_intfc *interface);
-void fm10k_ts_init(struct fm10k_intfc *interface);
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
-void fm10k_ptp_register(struct fm10k_intfc *interface);
-void fm10k_ptp_unregister(struct fm10k_intfc *interface);
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-
/* DCB */
#ifdef CONFIG_DCB
void fm10k_dcbnl_set_ops(struct net_device *dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 6cfae6a..5bbf19c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 45e4e5b..50f71e9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 2be4361..db4bd8b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 5d6137f..5116fd0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2f6a05b..9c0d875 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -77,19 +77,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
FM10K_STAT("tx_hang_count", tx_timeout_count),
-
- FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
-};
-
-static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
- FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
- FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
- FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
- FM10K_STAT("rx_switch_errors", rx_switch_errors),
- FM10K_STAT("rx_drops", rx_drops),
- FM10K_STAT("rx_pp_errors", rx_pp_errors),
- FM10K_STAT("rx_link_errors", rx_link_errors),
- FM10K_STAT("rx_length_errors", rx_length_errors),
};
static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
@@ -121,13 +108,21 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
};
+#define FM10K_QUEUE_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
+ .stat_offset = offsetof(struct fm10k_ring, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
+ FM10K_QUEUE_STAT("packets", stats.packets),
+ FM10K_QUEUE_STAT("bytes", stats.bytes),
+};
+
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
-#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
-
-#define FM10K_QUEUE_STATS_LEN(_n) \
- ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
FM10K_NETDEV_STATS_LEN + \
@@ -145,77 +140,56 @@ enum fm10k_self_test_types {
};
enum {
- FM10K_PRV_FLAG_DEBUG_STATS,
FM10K_PRV_FLAG_LEN,
};
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
- "debug-statistics",
};
+static void fm10k_add_stat_strings(char **p, const char *prefix,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s%s",
+ prefix, stats[i].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
char *p = (char *)data;
unsigned int i;
- unsigned int j;
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ if (interface->hw.mac.type != fm10k_mac_vf)
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
+ char prefix[ETH_GSTRING_LEN];
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- snprintf(p,
- ETH_GSTRING_LEN,
- "vf_%u_%s", i,
- fm10k_gstrings_mbx_stats[j].stat_string);
- p += ETH_GSTRING_LEN;
- }
- }
- }
+ snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
- for (i = 0; i < interface->hw.mac.max_queues; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -242,7 +216,6 @@ static void fm10k_get_strings(struct net_device *dev,
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int stats_len = FM10K_STATIC_STATS_LEN;
@@ -250,19 +223,11 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return FM10K_TEST_LEN;
case ETH_SS_STATS:
- stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+ stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
if (hw->mac.type != fm10k_mac_vf)
stats_len += FM10K_PF_STATS_LEN;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- stats_len += FM10K_DEBUG_STATS_LEN;
-
- if (iov_data)
- stats_len += FM10K_MBX_STATS_LEN *
- iov_data->num_vfs;
- }
-
return stats_len;
case ETH_SS_PRIV_FLAGS:
return FM10K_PRV_FLAG_LEN;
@@ -271,93 +236,80 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fm10k_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats,
- u64 *data)
+static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
{
- const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
- struct net_device_stats *net_stats = &netdev->stats;
+ unsigned int i;
char *p;
- int i, j;
- fm10k_update_stats(interface);
-
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ if (!pointer) {
+ /* memory is not zero allocated so we have to clear it */
+ for (i = 0; i < size; i++)
+ *((*data)++) = 0;
+ return;
}
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_global_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ for (i = 0; i < size; i++) {
+ p = (char *)pointer + stats[i].stat_offset;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_debug_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ switch (stats[i].sizeof_stat) {
+ case sizeof(u64):
+ *((*data)++) = *(u64 *)p;
+ break;
+ case sizeof(u32):
+ *((*data)++) = *(u32 *)p;
+ break;
+ case sizeof(u16):
+ *((*data)++) = *(u16 *)p;
+ break;
+ case sizeof(u8):
+ *((*data)++) = *(u8 *)p;
+ break;
+ default:
+ *((*data)++) = 0;
}
}
+}
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- p = (char *)&interface->hw.mbx +
- fm10k_gstrings_mbx_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+static void fm10k_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct net_device_stats *net_stats = &netdev->stats;
+ int i;
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_pf_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ fm10k_update_stats(interface);
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- struct fm10k_vf_info *vf_info;
+ fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- vf_info = &iov_data->vf_info[i];
+ fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- /* skip stats if we don't have a vf info */
- if (!vf_info) {
- data += FM10K_MBX_STATS_LEN;
- continue;
- }
+ fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
+ fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- p = (char *)&vf_info->mbx +
- fm10k_gstrings_mbx_stats[j].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ if (interface->hw.mac.type != fm10k_mac_vf) {
+ fm10k_add_ethtool_stats(&data, interface,
+ fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
- u64 *queue_stat;
ring = interface->tx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
ring = interface->rx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -425,7 +377,7 @@ static void fm10k_get_regs(struct net_device *netdev,
u32 *buff = p;
u16 i;
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
switch (hw->mac.type) {
case fm10k_mac_pf:
@@ -935,15 +887,15 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 attr_flag, test_msg[6];
unsigned long timeout;
- int err;
+ int err = -EINVAL;
/* For now this is a VF only feature */
if (hw->mac.type != fm10k_mac_vf)
return 0;
/* loop through both nested and unnested attribute types */
- for (attr_flag = (1 << FM10K_TEST_MSG_UNSET);
- attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED));
+ for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
+ attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
attr_flag += attr_flag) {
/* generate message to be tested */
fm10k_tlv_msg_test_create(test_msg, attr_flag);
@@ -1001,35 +953,56 @@ static void fm10k_self_test(struct net_device *dev,
static u32 fm10k_get_priv_flags(struct net_device *netdev)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (interface->flags & FM10K_FLAG_DEBUG_STATS)
- priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
-
- return priv_flags;
+ return 0;
}
static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
-
- if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+ if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
return -EINVAL;
- if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
- interface->flags |= FM10K_FLAG_DEBUG_STATS;
- else
- interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
-
return 0;
}
-static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
+{
+ u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
+ struct fm10k_hw *hw = &interface->hw;
+ u32 table[4];
+ int i, j;
+
+ /* record entries to reta table */
+ for (i = 0; i < FM10K_RETA_SIZE; i++) {
+ u32 reta, n;
+
+ /* generate a new table if we weren't given one */
+ for (j = 0; j < 4; j++) {
+ if (indir)
+ n = indir[i + j];
+ else
+ n = ethtool_rxfh_indir_default(i + j, rss_i);
+
+ table[j] = n;
+ }
+
+ reta = table[0] |
+ (table[1] << 8) |
+ (table[2] << 16) |
+ (table[3] << 24);
+
+ if (interface->reta[i] == reta)
+ continue;
+
+ interface->reta[i] = reta;
+ fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
+ }
+}
+
static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -1053,7 +1026,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_hw *hw = &interface->hw;
int i;
u16 rss_i;
@@ -1068,19 +1040,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
return -EINVAL;
}
- /* record entries to reta table */
- for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
- u32 reta = indir[0] |
- (indir[1] << 8) |
- (indir[2] << 16) |
- (indir[3] << 24);
-
- if (interface->reta[i] == reta)
- continue;
-
- interface->reta[i] = reta;
- fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
- }
+ fm10k_write_reta(interface, indir);
return 0;
}
@@ -1145,7 +1105,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev)
/* For QoS report channels per traffic class */
if (tcs > 1)
- max_combined = 1 << (fls(max_combined / tcs) - 1);
+ max_combined = BIT((fls(max_combined / tcs) - 1));
return max_combined;
}
@@ -1192,33 +1152,6 @@ static int fm10k_set_channels(struct net_device *dev,
return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
}
-static int fm10k_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (interface->ptp_clock)
- info->phc_index = ptp_clock_index(interface->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
-
- return 0;
-}
-
static const struct ethtool_ops fm10k_ethtool_ops = {
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
@@ -1246,7 +1179,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
- .get_ts_info = fm10k_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index acfb8b1f..47f0743 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
s64 vflre;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
@@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
struct fm10k_iov_data *iov_data;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4de17db..0e166e9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,15 +29,15 @@
#include "fm10k.h"
#define DRV_VERSION "0.19.3-k"
+#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
-static const char fm10k_driver_string[] =
- "Intel(R) Ethernet Switch Host Interface Driver";
+static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2016 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
+MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
@@ -401,10 +401,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
}
#define FM10K_RSS_L4_TYPES_MASK \
- ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
- (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV6_UDP))
+ (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV4_UDP) | \
+ BIT(FM10K_RSSTYPE_IPV6_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV6_UDP))
static inline void fm10k_rx_hash(struct fm10k_ring *ring,
union fm10k_rx_desc *rx_desc,
@@ -420,23 +420,10 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring,
return;
skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
- (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
-static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
- union fm10k_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct fm10k_intfc *interface = rx_ring->q_vector->interface;
-
- FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
-
- if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
- fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
- le64_to_cpu(rx_desc->q.timestamp));
-}
-
static void fm10k_type_trans(struct fm10k_ring *rx_ring,
union fm10k_rx_desc __maybe_unused *rx_desc,
struct sk_buff *skb)
@@ -486,8 +473,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
fm10k_rx_checksum(rx_ring, rx_desc, skb);
- fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
-
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -835,6 +820,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
+ u8 *transport_hdr;
+ __be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
@@ -852,9 +839,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
goto no_csum;
}
network_hdr.raw = skb_inner_network_header(skb);
+ transport_hdr = skb_inner_transport_header(skb);
} else {
protocol = vlan_get_protocol(skb);
network_hdr.raw = skb_network_header(skb);
+ transport_hdr = skb_transport_header(skb);
}
switch (protocol) {
@@ -863,15 +852,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
break;
case htons(ETH_P_IPV6):
l4_hdr = network_hdr.ipv6->nexthdr;
+ if (likely((transport_hdr - network_hdr.raw) ==
+ sizeof(struct ipv6hdr)))
+ break;
+ ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but ip version=%x!\n",
- protocol);
- }
- tx_ring->tx_stats.csum_err++;
- goto no_csum;
+ break;
}
switch (l4_hdr) {
@@ -884,9 +875,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum, version=%d l4 proto=%x\n",
+ protocol, l4_hdr);
}
+ skb_checksum_help(skb);
tx_ring->tx_stats.csum_err++;
goto no_csum;
}
@@ -912,11 +904,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
/* set type for advanced descriptor with frame checksum insertion */
u32 desc_flags = 0;
- /* set timestamping bits */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
- desc_flags |= FM10K_TXD_FLAG_TIME;
-
/* set checksum offload bits */
desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
FM10K_TXD_FLAG_CSUM);
@@ -1198,9 +1185,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
* fm10k_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
- struct fm10k_ring *tx_ring)
+ struct fm10k_ring *tx_ring, int napi_budget)
{
struct fm10k_intfc *interface = q_vector->interface;
struct fm10k_tx_buffer *tx_buffer;
@@ -1238,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1409,7 +1397,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
* accounts for changes in the ITR due to PCIe link speed.
*/
itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
- avg_wire_size += (1 << itr_round) - 1;
+ avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
/* write back value and retain adaptive flag */
@@ -1449,8 +1437,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- fm10k_for_each_ring(ring, q_vector->tx)
- clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+ fm10k_for_each_ring(ring, q_vector->tx) {
+ if (!fm10k_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (budget <= 0)
@@ -1468,7 +1458,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
work_done += work;
- clean_complete &= !!(work < per_ring_budget);
+ if (work >= per_ring_budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -1511,17 +1502,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
/* set QoS mask and indices */
f = &interface->ring_feature[RING_F_QOS];
f->indices = pcs;
- f->mask = (1 << fls(pcs - 1)) - 1;
+ f->mask = BIT(fls(pcs - 1)) - 1;
/* determine the upper limit for our current DCB mode */
rss_i = interface->hw.mac.max_queues / pcs;
- rss_i = 1 << (fls(rss_i) - 1);
+ rss_i = BIT(fls(rss_i) - 1);
/* set RSS mask and indices */
f = &interface->ring_feature[RING_F_RSS];
rss_i = min_t(u16, rss_i, f->limit);
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
/* configure pause class to queue mapping */
for (i = 0; i < pcs; i++)
@@ -1551,7 +1542,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
/* record indices and power of 2 mask for RSS */
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
interface->num_rx_queues = rss_i;
interface->num_tx_queues = rss_i;
@@ -1572,17 +1563,29 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
**/
static void fm10k_set_num_queues(struct fm10k_intfc *interface)
{
- /* Start with base case */
- interface->num_rx_queues = 1;
- interface->num_tx_queues = 1;
-
+ /* Attempt to setup QoS and RSS first */
if (fm10k_set_qos_queues(interface))
return;
+ /* If we don't have QoS, just fallback to only RSS. */
fm10k_set_rss_queues(interface);
}
/**
+ * fm10k_reset_num_queues - Reset the number of queues to zero
+ * @interface: board private structure
+ *
+ * This function should be called whenever we need to reset the number of
+ * queues after an error condition.
+ */
+static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
+{
+ interface->num_tx_queues = 0;
+ interface->num_rx_queues = 0;
+ interface->num_q_vectors = 0;
+}
+
+/**
* fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
* @interface: board private structure to initialize
* @v_count: q_vectors allocated on interface, used for ring interleaving
@@ -1765,9 +1768,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
return 0;
err_out:
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1787,9 +1788,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
{
int v_idx = interface->num_q_vectors;
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1935,7 +1934,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface)
static void fm10k_init_reta(struct fm10k_intfc *interface)
{
u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
- u32 reta, base;
+ u32 reta;
/* If the Rx flow indirection table has been configured manually, we
* need to maintain it when possible.
@@ -1960,21 +1959,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
}
repopulate_reta:
- /* Populate the redirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (i = FM10K_RETA_SIZE; i--;) {
- /* first pass generates n and n+2 */
- base = ((i * 0x00040004) + 0x00020000) * rss_i;
- reta = (base & 0x3F803F80) >> 7;
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * rss_i;
- reta |= (base & 0x3F803F80) << 1;
-
- interface->reta[i] = reta;
- }
+ fm10k_write_reta(interface, NULL);
}
/**
@@ -1997,14 +1982,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
if (err) {
dev_err(&interface->pdev->dev,
"Unable to initialize MSI-X capability\n");
- return err;
+ goto err_init_msix;
}
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface);
if (err) {
- fm10k_reset_msix_capability(interface);
- return err;
+ dev_err(&interface->pdev->dev,
+ "Unable to allocate queue vectors\n");
+ goto err_alloc_q_vectors;
}
/* Map rings to devices, and map devices to physical queues */
@@ -2014,6 +2000,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
fm10k_init_reta(interface);
return 0;
+
+err_alloc_q_vectors:
+ fm10k_reset_msix_capability(interface);
+err_init_msix:
+ fm10k_reset_num_queues(interface);
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 98202c3..c9dfa65 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 245a0a3..b7dbc8a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index d09a8dd..2a08d3f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -243,9 +243,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
for (i = 0; i < interface->num_tx_queues; i++)
fm10k_clean_tx_ring(interface->tx_ring[i]);
-
- /* remove any stale timestamp buffers and free them */
- skb_queue_purge(&interface->ts_tx_skb_queue);
}
/**
@@ -440,7 +437,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
* @sa_family: Address family of new port
* @port: port number used for VXLAN
*
- * This funciton is called when a new VXLAN interface has added a new port
+ * This function is called when a new VXLAN interface has added a new port
* number to the range that is currently in use for VXLAN. The new port
* number is always added to the tail so that the port number list should
* match the order in which the ports were allocated. The head of the list
@@ -484,7 +481,7 @@ insert_tail:
* @sa_family: Address family of freed port
* @port: port number used for VXLAN
*
- * This funciton is called when a new VXLAN interface has freed a port
+ * This function is called when a new VXLAN interface has freed a port
* number from the range that is currently in use for VXLAN. The freed
* port is removed from the list and the new head is used to determine
* the port number for offloads.
@@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
__skb_put(skb, pad_len);
}
- /* prepare packet for hardware time stamping */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- fm10k_ts_tx_enqueue(interface, skb);
-
if (r_idx >= interface->num_tx_queues)
r_idx %= interface->num_tx_queues;
@@ -884,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev,
return -EADDRNOTAVAIL;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_uc_addr(hw, glort, addr,
@@ -947,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev,
u16 vid, glort = interface->glort;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
@@ -1002,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev)
}
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
}
@@ -1044,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_vlan(hw, 0, 0, true);
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_vlan(hw, vid, 0, true);
@@ -1056,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
@@ -1213,18 +1200,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return fm10k_setup_tc(dev, tc->tc);
}
-static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return fm10k_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return fm10k_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
@@ -1402,7 +1377,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_add_vxlan_port = fm10k_add_vxlan_port,
.ndo_del_vxlan_port = fm10k_del_vxlan_port,
- .ndo_do_ioctl = fm10k_ioctl,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1429,7 +1403,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
/* configure default debug level */
interface = netdev_priv(dev);
- interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
/* configure default features */
dev->features |= NETIF_F_IP_CSUM |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4eb7a6f..e05aca9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface)
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
{
- BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+ WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
@@ -145,7 +145,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
usleep_range(1000, 2000);
@@ -209,9 +209,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- /* reset clock */
- fm10k_ts_reset(interface);
-
err = netif_running(netdev) ? fm10k_open(netdev) : 0;
if (err)
goto err_open;
@@ -559,7 +556,6 @@ static void fm10k_service_task(struct work_struct *work)
/* tasks only run when interface is up */
fm10k_watchdog_subtask(interface);
fm10k_check_hang_subtask(interface);
- fm10k_ts_tx_subtask(interface);
/* release lock on service events to allow scheduling next event */
fm10k_service_event_complete(interface);
@@ -579,7 +575,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
u64 tdba = ring->dma;
u32 size = ring->count * sizeof(struct fm10k_tx_desc);
u32 txint = FM10K_INT_MAP_DISABLE;
- u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
+ u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
@@ -730,7 +726,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
if (interface->pfc_en)
rx_pause = interface->pfc_en;
#endif
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -779,7 +775,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u8 reg_idx = ring->reg_idx;
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -903,8 +899,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
/* service upstream mailbox */
if (fm10k_mbx_trylock(interface)) {
@@ -1065,7 +1061,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
if (maxholdq)
fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
for (q = 255;;) {
- if (maxholdq & (1 << 31)) {
+ if (maxholdq & BIT(31)) {
if (q < FM10K_MAX_QUEUES_PF) {
interface->rx_overrun_pf++;
fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
@@ -1135,22 +1131,24 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
return IRQ_HANDLED;
}
void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
{
- struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
struct fm10k_hw *hw = &interface->hw;
+ struct msix_entry *entry;
int itr_reg;
/* no mailbox IRQ to free if MSI-X is not enabled */
if (!interface->msix_entries)
return;
+ entry = &interface->msix_entries[FM10K_MBX_VECTOR];
+
/* disconnect the mailbox */
hw->mbx.ops.disconnect(hw, &hw->mbx);
@@ -1202,25 +1200,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_intfc *interface;
- u64 timestamp;
- s32 err;
-
- err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
- &timestamp);
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
-
- return 0;
-}
-
/* generic error handler for mailbox issues */
static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info __always_unused *mbx)
@@ -1241,7 +1220,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
- FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1253,7 +1231,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry;
+ u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
@@ -1285,11 +1263,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
u32 dglort_map = hw->mac.dglort_map;
s32 err;
+ interface = container_of(hw, struct fm10k_intfc, hw);
+
+ err = fm10k_msg_err_pf(hw, results, mbx);
+ if (!err && hw->swapi.status) {
+ /* force link down for a reasonable delay */
+ interface->link_down_event = jiffies + (2 * HZ);
+ set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+ /* reset dglort_map back to no config */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ fm10k_service_event_schedule(interface);
+
+ /* prevent overloading kernel message buffer */
+ if (interface->lport_map_failed)
+ return 0;
+
+ interface->lport_map_failed = true;
+
+ if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
+ dev_warn(&interface->pdev->dev,
+ "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
+ dev_warn(&interface->pdev->dev,
+ "request logical port map failed: %d\n",
+ hw->swapi.status);
+
+ return 0;
+ }
+
err = fm10k_msg_lport_map_pf(hw, results, mbx);
if (err)
return err;
- interface = container_of(hw, struct fm10k_intfc, hw);
+ interface->lport_map_failed = false;
/* we need to reset if port count was just updated */
if (dglort_map != hw->mac.dglort_map)
@@ -1339,68 +1346,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_swapi_1588_timestamp timestamp;
- struct fm10k_iov_data *iov_data;
- struct fm10k_intfc *interface;
- u16 sglort, vf_idx;
- s32 err;
-
- err = fm10k_tlv_attr_get_le_struct(
- results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
- &timestamp, sizeof(timestamp));
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- if (timestamp.dglort) {
- fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
- le64_to_cpu(timestamp.egress));
- return 0;
- }
-
- /* either dglort or sglort must be set */
- if (!timestamp.sglort)
- return FM10K_ERR_PARAM;
-
- /* verify GLORT is at least one of the ones we own */
- sglort = le16_to_cpu(timestamp.sglort);
- if (!fm10k_glort_valid_pf(hw, sglort))
- return FM10K_ERR_PARAM;
-
- if (sglort == interface->glort) {
- fm10k_ts_tx_hwtstamp(interface, 0,
- le64_to_cpu(timestamp.ingress));
- return 0;
- }
-
- /* if there is no iov_data then there is no mailboxes to process */
- if (!ACCESS_ONCE(interface->iov_data))
- return FM10K_ERR_PARAM;
-
- rcu_read_lock();
-
- /* notify VF if this timestamp belongs to it */
- iov_data = interface->iov_data;
- vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
-
- if (!iov_data || vf_idx >= iov_data->num_vfs) {
- err = FM10K_ERR_PARAM;
- goto err_unlock;
- }
-
- err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
- le64_to_cpu(timestamp.ingress));
-
-err_unlock:
- rcu_read_unlock();
-
- return err;
-}
-
static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1408,7 +1353,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
- FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1420,8 +1364,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
- u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
+ u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
+ u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
@@ -1654,6 +1598,7 @@ void fm10k_down(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
+ int err;
/* signal that we are down to the interrupt handler and service task */
set_bit(__FM10K_DOWN, &interface->state);
@@ -1678,7 +1623,9 @@ void fm10k_down(struct fm10k_intfc *interface)
fm10k_update_stats(interface);
/* Disable DMA engine for Tx/Rx */
- hw->mac.ops.stop_hw(hw);
+ err = hw->mac.ops.stop_hw(hw);
+ if (err)
+ dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
@@ -1776,35 +1723,17 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
return -EIO;
}
- /* assign BAR 4 resources for use with PTP */
- if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
- interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
- pci_resource_len(pdev, 4));
- hw->sw_addr = interface->sw_addr;
-
/* initialize DCBNL interface */
fm10k_dcbnl_set_ops(netdev);
- /* Initialize service timer and service task */
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- setup_timer(&interface->service_timer, &fm10k_service_timer,
- (unsigned long)interface);
- INIT_WORK(&interface->service_task, fm10k_service_task);
-
- /* kick off service timer now, even when interface is down */
- mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
-
- /* Intitialize timestamp data */
- fm10k_ts_init(interface);
-
/* set default ring sizes */
interface->tx_ring_count = FM10K_DEFAULT_TXD;
interface->rx_ring_count = FM10K_DEFAULT_RXD;
@@ -1987,6 +1916,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* the mbx interrupt might attempt to schedule the service task, so we
+ * must ensure it is disabled since we haven't yet requested the timer
+ * or work item.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_interrupt;
@@ -2006,8 +1941,15 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* stop all the transmit queues from transmitting until link is up */
netif_tx_stop_all_queues(netdev);
- /* Register PTP interface */
- fm10k_ptp_register(interface);
+ /* Initialize service timer and service task late in order to avoid
+ * cleanup issues.
+ */
+ setup_timer(&interface->service_timer, &fm10k_service_timer,
+ (unsigned long)interface);
+ INIT_WORK(&interface->service_task, fm10k_service_task);
+
+ /* kick off service timer now, even when interface is down */
+ mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
/* print warning for non-optimal configurations */
fm10k_slot_warn(interface);
@@ -2065,9 +2007,6 @@ static void fm10k_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- /* cleanup timestamp handling */
- fm10k_ptp_unregister(interface);
-
/* release VFs */
fm10k_iov_disable(pdev);
@@ -2140,9 +2079,6 @@ static int fm10k_resume(struct pci_dev *pdev)
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
- /* reset clock */
- fm10k_ts_reset(interface);
-
rtnl_lock();
err = fm10k_init_queueing_scheme(interface);
@@ -2259,15 +2195,17 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
+
if (netif_running(netdev))
fm10k_close(netdev);
+ fm10k_mbx_free_irq(interface);
+
/* free interrupts */
fm10k_clear_queueing_scheme(interface);
- fm10k_mbx_free_irq(interface);
-
- pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -2337,27 +2275,31 @@ static void fm10k_io_resume(struct pci_dev *pdev)
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+ rtnl_lock();
+
err = fm10k_init_queueing_scheme(interface);
if (err) {
dev_err(&interface->pdev->dev,
"init_queueing_scheme failed: %d\n", err);
- return;
+ goto unlock;
}
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
- /* reset clock */
- fm10k_ts_reset(interface);
-
+ rtnl_lock();
if (netif_running(netdev))
err = fm10k_open(netdev);
+ rtnl_unlock();
/* final check of hardware state before registering the interface */
err = err ? : fm10k_hw_ready(interface);
if (!err)
netif_device_attach(netdev);
+
+unlock:
+ rtnl_unlock();
}
static const struct pci_error_handlers fm10k_err_handler = {
@@ -2382,7 +2324,7 @@ static struct pci_driver fm10k_driver = {
/**
* fm10k_register_pci_driver - register driver interface
*
- * This funciton is called on module load in order to register the driver.
+ * This function is called on module load in order to register the driver.
**/
int fm10k_register_pci_driver(void)
{
@@ -2392,7 +2334,7 @@ int fm10k_register_pci_driver(void)
/**
* fm10k_unregister_pci_driver - unregister driver interface
*
- * This funciton is called on module unload in order to remove the driver.
+ * This function is called on module unload in order to remove the driver.
**/
void fm10k_unregister_pci_driver(void)
{
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 8cf943d..dc75507 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -219,8 +219,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
/* VLAN multi-bit write:
* The multi-bit write has several parts to it.
- * 3 2 1 0
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | RSVD0 | Length |C|RSVD0| VLAN ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -488,6 +488,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
if (!fm10k_glort_valid_pf(hw, glort))
return FM10K_ERR_PARAM;
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
/* construct the lport message from the 2 pieces of data we have */
lport_msg = ((u32)count << 16) | glort;
@@ -527,8 +531,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* determine count of VSIs and queues */
- queue_count = 1 << (dglort->rss_l + dglort->pc_l);
- vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
+ queue_count = BIT(dglort->rss_l + dglort->pc_l);
+ vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
glort = dglort->glort;
q_idx = dglort->queue_b;
@@ -544,8 +548,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
}
/* determine count of PCs and queues */
- queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
- pc_count = 1 << dglort->pc_l;
+ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = BIT(dglort->pc_l);
/* configure PC for Tx queues */
for (pc = 0; pc < pc_count; pc++) {
@@ -711,8 +715,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
FM10K_RXDCTL_DROP_ON_EMPTY);
fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
- FM10K_RXQCTL_VF |
- (i << FM10K_RXQCTL_VF_SHIFT));
+ (i << FM10K_RXQCTL_VF_SHIFT) |
+ FM10K_RXQCTL_VF);
/* map queue pair to VF */
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
@@ -864,9 +868,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
- /* determine correct default VLAN ID */
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
if (vf_info->pf_vid)
- vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
else
vf_vid = vf_info->sw_vid;
@@ -952,7 +960,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* clear event notification of VF FLR */
- fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
+ fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
/* force timeout and then disconnect the mailbox */
vf_info->mbx.timeout = 0;
@@ -987,7 +995,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
(vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
- rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
+ rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
/* stop further DMA and reset queue ownership back to VF */
for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
@@ -1140,19 +1148,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
fm10k_update_hw_stats_q(hw, q, idx, qpp);
}
-static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
- struct fm10k_vf_info *vf_info,
- u64 timestamp)
-{
- u32 msg[4];
-
- /* generate port state response to notify VF it is not ready */
- fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
- fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
-
- return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
-}
-
/**
* fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
* @hw: Pointer to hardware structure
@@ -1384,7 +1379,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
/* if mode is not currently enabled, enable it */
- if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
/* swap mode back to a bit flag */
@@ -1618,7 +1613,7 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
* @hw: pointer to hardware structure
* @switch_ready: pointer to boolean value that will record switch state
*
- * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * This function will check the DMA_CTRL2 register and mailbox in order
* to determine if the switch is ready for the PF to begin requesting
* addresses and mapping traffic to the local interface.
**/
@@ -1647,6 +1642,8 @@ out:
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
FM10K_TLV_ATTR_LAST
};
@@ -1787,89 +1784,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
return 0;
}
-const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
- FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
- sizeof(struct fm10k_swapi_1588_timestamp)),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
-/**
- * fm10k_adjust_systime_pf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function will adjust the SYSTIME_CFG register contained in BAR 4
- * if this function is supported for BAR 4 access. The adjustment amount
- * is based on the parts per billion value provided and adjusted to a
- * value based on parts per 2^48 clock cycles.
- *
- * If adjustment is not supported or the requested value is too large
- * we will return an error.
- **/
-static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
-{
- u64 systime_adjust;
-
- /* if sw_addr is not set we don't have switch register access */
- if (!hw->sw_addr)
- return ppb ? FM10K_ERR_PARAM : 0;
-
- /* we must convert the value from parts per billion to parts per
- * 2^48 cycles. In addition I have opted to only use the 30 most
- * significant bits of the adjustment value as the 8 least
- * significant bits are located in another register and represent
- * a value significantly less than a part per billion, the result
- * of dropping the 8 least significant bits is that the adjustment
- * value is effectively multiplied by 2^8 when we write it.
- *
- * As a result of all this the math for this breaks down as follows:
- * ppb / 10^9 == adjust * 2^8 / 2^48
- * If we solve this for adjust, and simplify it comes out as:
- * ppb * 2^31 / 5^9 == adjust
- */
- systime_adjust = (ppb < 0) ? -ppb : ppb;
- systime_adjust <<= 31;
- do_div(systime_adjust, 1953125);
-
- /* verify the requested adjustment value is in range */
- if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
- return FM10K_ERR_PARAM;
-
- if (ppb > 0)
- systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
-
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
-
- return 0;
-}
-
-/**
- * fm10k_read_systime_pf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanosecods. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1899,8 +1813,6 @@ static const struct fm10k_mac_ops mac_ops_pf = {
.set_dma_mask = fm10k_set_dma_mask_pf,
.get_fault = fm10k_get_fault_pf,
.get_host_state = fm10k_get_host_state_pf,
- .adjust_systime = fm10k_adjust_systime_pf,
- .read_systime = fm10k_read_systime_pf,
};
static const struct fm10k_iov_ops iov_ops_pf = {
@@ -1912,7 +1824,6 @@ static const struct fm10k_iov_ops iov_ops_pf = {
.set_lport = fm10k_iov_set_lport_pf,
.reset_lport = fm10k_iov_reset_lport_pf,
.update_stats = fm10k_iov_update_stats_pf,
- .report_timestamp = fm10k_iov_report_timestamp_pf,
};
static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index b2d96b4..3336d3c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 {
FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
- FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
- FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
};
enum fm10k_pf_tlv_attr_id_v1 {
@@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 {
FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
FM10K_PF_ATTR_ID_PORT = 0x0C,
FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
- FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
};
#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
@@ -74,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
/* The following data structures are overlayed directly onto TLV mailbox
* messages, and must not break 4 byte alignment. Ensure the structures line
* up correctly as per their TLV definition.
@@ -100,13 +99,6 @@ struct fm10k_swapi_error {
struct fm10k_global_table_data ffu;
} __aligned(4) __packed;
-struct fm10k_swapi_1588_timestamp {
- __le64 egress;
- __le64 ingress;
- __le16 dglort;
- __le16 sglort;
-} __aligned(4) __packed;
-
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
@@ -122,11 +114,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
-#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
- fm10k_1588_timestamp_msg_attr, func)
-
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
deleted file mode 100644
index b4945e8..0000000
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
-
-#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
-
-#include "fm10k.h"
-
-#define FM10K_TS_TX_TIMEOUT (HZ * 15)
-
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime)
-{
- unsigned long flags;
-
- read_lock_irqsave(&interface->systime_lock, flags);
- systime += interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- hwtstamp->hwtstamp = ns_to_ktime(systime);
-}
-
-static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
- __le16 dglort)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
-
- skb_queue_walk(list, skb) {
- if (FM10K_CB(skb)->fi.w.dglort == dglort)
- return skb;
- }
-
- return NULL;
-}
-
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *clone;
- unsigned long flags;
-
- /* create clone for us to return on the Tx path */
- clone = skb_clone_sk(skb);
- if (!clone)
- return;
-
- FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate any buffers with the same dglort,
- * if none are present then insert skb in tail of list
- */
- skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
- if (!skb) {
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- __skb_queue_tail(list, clone);
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if list is already has one then we just free the clone */
- if (skb)
- dev_kfree_skb(clone);
-}
-
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime)
-{
- struct skb_shared_hwtstamps shhwtstamps;
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate and pull the sk_buff out of the list */
- skb = fm10k_ts_tx_skb(interface, dglort);
- if (skb)
- __skb_unlink(skb, list);
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if not found do nothing */
- if (!skb)
- return;
-
- /* timestamp the sk_buff and free out copy */
- fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
-}
-
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb, *tmp;
- unsigned long flags;
-
- /* If we're down or resetting, just bail */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
- return;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* walk though the list and flush any expired timestamp packets */
- skb_queue_walk_safe(list, skb, tmp) {
- if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
- continue;
- __skb_unlink(skb, list);
- kfree_skb(skb);
- interface->tx_hwtstamp_timeouts++;
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-static u64 fm10k_systime_read(struct fm10k_intfc *interface)
-{
- struct fm10k_hw *hw = &interface->hw;
-
- return hw->mac.ops.read_systime(hw);
-}
-
-void fm10k_ts_reset(struct fm10k_intfc *interface)
-{
- s64 ns = ktime_to_ns(ktime_get_real());
- unsigned long flags;
-
- /* reinitialize the clock */
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-}
-
-void fm10k_ts_init(struct fm10k_intfc *interface)
-{
- /* Initialize lock protecting systime access */
- rwlock_init(&interface->systime_lock);
-
- /* Initialize skb queue for pending timestamp requests */
- skb_queue_head_init(&interface->ts_tx_skb_queue);
-
- /* reset the clock to current kernel time */
- fm10k_ts_reset(interface);
-}
-
-/**
- * fm10k_get_ts_config - get current hardware timestamping configuration
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * This function returns the current timestamping settings. Rather than
- * attempt to deconstruct registers to fill in the values, simply keep a copy
- * of the old settings around, and return a copy when requested.
- */
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config *config = &interface->ts_config;
-
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
-}
-
-/**
- * fm10k_set_ts_config - control hardware time stamping
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't cause any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- * Since hardware always timestamps Path delay packets when timestamping V2
- * packets, regardless of the type specified in the register, only use V2
- * Event mode. This more accurately tells the user what the hardware is going
- * to do anyways.
- */
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config ts_config;
-
- if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (ts_config.flags)
- return -EINVAL;
-
- switch (ts_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- /* we likely need some check here to see if this is supported */
- break;
- default:
- return -ERANGE;
- }
-
- switch (ts_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_ALL:
- interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
- ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- /* save these settings for future reference */
- interface->ts_config = ts_config;
-
- return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
- -EFAULT : 0;
-}
-
-static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- int err;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- err = hw->mac.ops.adjust_systime(hw, ppb);
-
- /* the only error we should see is if the value is out of range */
- return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
-}
-
-static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust += delta;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 now;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- read_lock_irqsave(&interface->systime_lock, flags);
- now = fm10k_systime_read(interface) + interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- *ts = ns_to_timespec64(now);
-
- return 0;
-}
-
-static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 ns = timespec64_to_ns(ts);
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int __always_unused on)
-{
- struct ptp_clock_time *t = &rq->perout.period;
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- u64 period;
- u32 step;
-
- /* we can only support periodic output */
- if (rq->type != PTP_CLK_REQ_PEROUT)
- return -EINVAL;
-
- /* verify the requested channel is there */
- if (rq->perout.index >= ptp->n_per_out)
- return -EINVAL;
-
- /* we cannot enforce start time as there is no
- * mechanism for that in the hardware, we can only control
- * the period.
- */
-
- /* we cannot support periods greater than 4 seconds due to reg limit */
- if (t->sec > 4 || t->sec < 0)
- return -ERANGE;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- /* we simply cannot support the operation if we don't have BAR4 */
- if (!hw->sw_addr)
- return -ENOTSUPP;
-
- /* convert to unsigned 64b ns, verify we can put it in a 32b register */
- period = t->sec * 1000000000LL + t->nsec;
-
- /* determine the minimum size for period */
- step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
- FM10K_SYSTIME_CFG_STEP_MASK);
-
- /* verify the value is in range supported by hardware */
- if ((period && (period < step)) || (period > U32_MAX))
- return -ERANGE;
-
- /* notify hardware of request to being sending pulses */
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
- (u32)period);
-
- return 0;
-}
-
-static struct ptp_pin_desc fm10k_ptp_pd[2] = {
- {
- .name = "IEEE1588_PULSE0",
- .index = 0,
- .func = PTP_PF_PEROUT,
- .chan = 0
- },
- {
- .name = "IEEE1588_PULSE1",
- .index = 1,
- .func = PTP_PF_PEROUT,
- .chan = 1
- }
-};
-
-static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- /* verify the requested pin is there */
- if (pin >= ptp->n_pins || !ptp->pin_config)
- return -EINVAL;
-
- /* enforce locked channels, no changing them */
- if (chan != ptp->pin_config[pin].chan)
- return -EINVAL;
-
- /* we want to keep the functions locked as well */
- if (func != ptp->pin_config[pin].func)
- return -EINVAL;
-
- return 0;
-}
-
-void fm10k_ptp_register(struct fm10k_intfc *interface)
-{
- struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
- struct device *dev = &interface->pdev->dev;
- struct ptp_clock *ptp_clock;
-
- snprintf(ptp_caps->name, sizeof(ptp_caps->name),
- "%s", interface->netdev->name);
- ptp_caps->owner = THIS_MODULE;
- /* This math is simply the inverse of the math in
- * fm10k_adjust_systime_pf applied to an adjustment value
- * of 2^30 - 1 which is the maximum value of the register:
- * max_ppb == ((2^30 - 1) * 5^9) / 2^31
- */
- ptp_caps->max_adj = 976562;
- ptp_caps->adjfreq = fm10k_ptp_adjfreq;
- ptp_caps->adjtime = fm10k_ptp_adjtime;
- ptp_caps->gettime64 = fm10k_ptp_gettime;
- ptp_caps->settime64 = fm10k_ptp_settime;
-
- /* provide pins if BAR4 is accessible */
- if (interface->sw_addr) {
- /* enable periodic outputs */
- ptp_caps->n_per_out = 2;
- ptp_caps->enable = fm10k_ptp_enable;
-
- /* enable clock pins */
- ptp_caps->verify = fm10k_ptp_verify;
- ptp_caps->n_pins = 2;
- ptp_caps->pin_config = fm10k_ptp_pd;
- }
-
- ptp_clock = ptp_clock_register(ptp_caps, dev);
- if (IS_ERR(ptp_clock)) {
- ptp_clock = NULL;
- dev_err(dev, "ptp_clock_register failed\n");
- } else {
- dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
- }
-
- interface->ptp_clock = ptp_clock;
-}
-
-void fm10k_ptp_unregister(struct fm10k_intfc *interface)
-{
- struct ptp_clock *ptp_clock = interface->ptp_clock;
- struct device *dev = &interface->pdev->dev;
-
- if (!ptp_clock)
- return;
-
- interface->ptp_clock = NULL;
-
- ptp_clock_unregister(ptp_clock);
- dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
-}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index ab01bb3..f8e87bf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
if (len < 4) {
- attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
+ attr[1] = (u32)value & (BIT(8 * len) - 1);
} else {
attr[1] = (u32)value;
if (len > 4)
@@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr,
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- * and 0 on success.
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
**/
static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
@@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
- if (attr_id < FM10K_TLV_RESULTS_MAX)
- err = fm10k_tlv_attr_validate(attr, tlv_attr);
- else
- err = FM10K_NOT_IMPLEMENTED;
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
- if (err < 0)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
return err;
- if (!err)
+ else
results[attr_id] = attr;
/* update offset */
@@ -652,29 +654,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
**/
static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
{
- if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
+ if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
test_str);
- if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
+ if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
test_mac, test_vlan);
- if (attr_flags & (1 << FM10K_TEST_MSG_U8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U8))
fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
- if (attr_flags & (1 << FM10K_TEST_MSG_U16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U16))
fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
- if (attr_flags & (1 << FM10K_TEST_MSG_U32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U32))
fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
- if (attr_flags & (1 << FM10K_TEST_MSG_U64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U64))
fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
- if (attr_flags & (1 << FM10K_TEST_MSG_S8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S8))
fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
- if (attr_flags & (1 << FM10K_TEST_MSG_S16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S16))
fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
- if (attr_flags & (1 << FM10K_TEST_MSG_S32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S32))
fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
- if (attr_flags & (1 << FM10K_TEST_MSG_S64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S64))
fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
- if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
+ if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
test_le, 8);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index e1845e0..a1f1027 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 854ebb1..b8bc061 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -225,11 +225,6 @@ struct fm10k_hw;
#define FM10K_STATS_LOOPBACK_DROP 0x3806
#define FM10K_STATS_NODESC_DROP 0x3807
-/* Timesync registers */
-#define FM10K_SYSTIME 0x3814
-#define FM10K_SYSTIME_CFG 0x3818
-#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
-
/* PCIe state registers */
#define FM10K_PHYADDR 0x381C
@@ -355,6 +350,7 @@ struct fm10k_hw;
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
@@ -381,12 +377,6 @@ struct fm10k_hw;
#define FM10K_VFSYSTIME 0x00040
#define FM10K_VFITR(_n) ((_n) + 0x00060)
-/* Registers contained in BAR 4 for Switch management */
-#define FM10K_SW_SYSTIME_ADJUST 0x0224D
-#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
-#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
-
enum fm10k_int_source {
fm10k_int_mailbox = 0,
fm10k_int_pcie_fault = 1,
@@ -550,8 +540,6 @@ struct fm10k_mac_ops {
struct fm10k_dglort_cfg *);
void (*set_dma_mask)(struct fm10k_hw *, u64);
s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
- s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
- u64 (*read_systime)(struct fm10k_hw *);
};
enum fm10k_mac_type {
@@ -617,10 +605,10 @@ struct fm10k_vf_info {
*/
};
-#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
-#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI)
-#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC)
-#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
@@ -643,7 +631,6 @@ struct fm10k_iov_ops {
s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
- s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
};
struct fm10k_iov_info {
@@ -667,7 +654,6 @@ struct fm10k_info {
struct fm10k_hw {
u32 __iomem *hw_addr;
- u32 __iomem *sw_addr;
void *back;
struct fm10k_mac_info mac;
struct fm10k_bus_info bus;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 91f8d73..3b06685e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -188,7 +188,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
if (vsi)
return FM10K_ERR_PARAM;
- /* verify upper 4 bits of vid and length are 0 */
+ /* clever trick to verify reserved bits in both vid and length */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
@@ -228,7 +228,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
ether_addr_copy(hw->mac.perm_addr, perm_addr);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
- hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
return 0;
}
@@ -451,13 +451,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
-const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
- FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
/**
* fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
* @hw: pointer to hardware structure
@@ -509,52 +502,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
return 0;
}
-/**
- * fm10k_adjust_systime_vf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function takes an adjustment rate in parts per billion and will
- * verify that this value is 0 as the VF cannot support adjusting the
- * systime clock.
- *
- * If the ppb value is non-zero the return is ERR_PARAM else success
- **/
-static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
-{
- /* The VF cannot adjust the clock frequency, however it should
- * already have a syntonic clock with whichever host interface is
- * running as the master for the host interface clock domain so
- * there should be not frequency adjustment necessary.
- */
- return ppb ? FM10K_ERR_PARAM : 0;
-}
-
-/**
- * fm10k_read_systime_vf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanoseconds. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
@@ -579,8 +526,6 @@ static const struct fm10k_mac_ops mac_ops_vf = {
.rebind_hw_stats = fm10k_rebind_hw_stats_vf,
.configure_dglort_map = fm10k_configure_dglort_map_vf,
.get_host_state = fm10k_get_host_state_generic,
- .adjust_systime = fm10k_adjust_systime_vf,
- .read_systime = fm10k_read_systime_vf,
};
static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index c4439f1..2662f33 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id {
FM10K_VF_MSG_ID_MSIX,
FM10K_VF_MSG_ID_MAC_VLAN,
FM10K_VF_MSG_ID_LPORT_STATE,
- FM10K_VF_MSG_ID_1588,
FM10K_VF_MSG_ID_MAX,
};
@@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id {
FM10K_LPORT_STATE_MSG_MAX
};
-enum fm10k_tlv_1588_attr_id {
- FM10K_1588_MSG_TIMESTAMP,
- FM10K_1588_MSG_MAX
-};
-
#define FM10K_VF_MSG_MSIX_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
@@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
fm10k_lport_state_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
-#define FM10K_VF_MSG_1588_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-
extern const struct fm10k_info fm10k_vf_info;
#endif /* _FM10K_VF_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ce6e9c..9c44739 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -97,12 +97,12 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
-#define I40E_PRIV_FLAGS_PS BIT(4)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5)
+#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -112,7 +112,9 @@
#define I40E_OEM_VER_PATCH_MASK 0xff
#define I40E_OEM_VER_BUILD_SHIFT 8
#define I40E_OEM_VER_SHIFT 24
-#define I40E_PHY_DEBUG_PORT BIT(4)
+#define I40E_PHY_DEBUG_ALL \
+ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -123,10 +125,7 @@
#define XSTRINGIFY(bar) STRINGIFY(bar)
#define I40E_RX_DESC(R, i) \
- ((ring_is_16byte_desc_enabled(R)) \
- ? (union i40e_32byte_rx_desc *) \
- (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
- : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -202,6 +201,7 @@ struct i40e_lump_tracking {
#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -244,7 +244,6 @@ struct i40e_fdir_filter {
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
-#define I40E_MAX_USER_PRIORITY 8
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -320,8 +319,6 @@ struct i40e_pf {
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
@@ -330,7 +327,6 @@ struct i40e_pf {
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
@@ -363,6 +359,7 @@ struct i40e_pf {
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -534,9 +531,7 @@ struct i40e_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
@@ -554,7 +549,7 @@ struct i40e_vsi {
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
- u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+ s16 vf_id; /* Virtual function ID for SRIOV VSIs */
struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info;
@@ -811,6 +806,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -823,7 +819,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
-int i40e_close(struct net_device *netdev);
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index df8e2fd..738b42a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,16 +33,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
- (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
-}
-
-/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -624,13 +614,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = false;
+ hw->nvm_release_on_done = false;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
ret_code = 0;
/* success! */
@@ -1023,26 +1009,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = false;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 12fbbdd..d92aad3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8d5c65a..11cf1a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -429,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1585,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1652,11 +1628,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1857,7 +1833,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -1927,9 +1906,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2226,13 +2205,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
*/
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
+ BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
@@ -2250,7 +2227,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
- (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2303,7 +2280,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2323,14 +2300,13 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index bf6b453..a4601d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -217,7 +217,7 @@ struct i40e_client {
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
enum i40e_client_type type;
- struct i40e_client_ops *ops; /* client ops provided by the client */
+ const struct i40e_client_ops *ops; /* client ops provided by the client */
};
static inline bool i40e_client_is_registered(struct i40e_client *client)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294..422b41d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -60,6 +60,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -694,7 +696,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1901,13 +1903,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Reset the external PHY.
**/
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- enum i40e_status_code status;
+ i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1970,10 +1972,12 @@ aq_add_vsi_exit:
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1986,8 +1990,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -2037,6 +2042,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2157,6 +2232,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2246,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
return status;
}
@@ -2205,6 +2286,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = cpu_to_le16(flags);
+ scfg->valid_flags = cpu_to_le16(valid_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_get_firmware_version
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
@@ -2660,10 +2770,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -2780,36 +2887,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3073,6 +3150,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
@@ -3128,6 +3208,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c97733..e6af8c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- if (vsi->active_vlans)
- dev_info(&pf->pdev->dev,
- " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->state, vsi->flags,
@@ -269,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, rx_ring->rx_hdr_len,
- rx_ring->rx_buf_len,
- rx_ring->dtype);
+ " rx_rings[%i]: rx_buf_len = %d\n",
+ i, rx_ring->rx_buf_len);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, ring_is_ps_enabled(rx_ring),
+ " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
rx_ring->next_to_use,
rx_ring->next_to_clean,
rx_ring->ring_active);
@@ -327,9 +324,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, tx_ring->dtype);
- dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i,
tx_ring->next_to_use,
@@ -366,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" work_limit = %d\n",
vsi->work_limit);
dev_info(&pf->pdev->dev,
- " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
- vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ " max_frame = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_buf_len, 0);
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -592,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
" d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, i);
- dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx\n",
- i, rxd->read.pkt_addr,
- rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
@@ -620,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, desc_n);
- dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 99257fc..d701861 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -44,6 +44,8 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b165..5e8d84f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -230,12 +230,22 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+ "MFP",
+ "LinkPolling",
+ "flow-director-atr",
+ "veb-stats",
+ "hw-atr-eviction",
+ "vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
"LinkPolling",
"flow-director-atr",
"veb-stats",
- "packet-split",
"hw-atr-eviction",
};
@@ -252,6 +262,110 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
}
/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: PHY types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+ u32 *advertising)
+{
+ enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
+
+ *supported = 0x0;
+ *advertising = 0x0;
+
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+ *supported |= SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ *supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ *supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
+ }
+ if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+ !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ *supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ *supported |= SUPPORTED_40000baseLR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ *supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ *supported |= SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ *supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_10000baseKR_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ *supported |= SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ *supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ecmd: ethtool command to fill in
@@ -265,6 +379,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
+ u32 e_advertising = 0x0;
+ u32 e_supported = 0x0;
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
@@ -305,14 +421,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ecmd->supported = SUPPORTED_Autoneg |
@@ -320,12 +440,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_1000baseT_Full;
break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
ecmd->supported = SUPPORTED_Autoneg |
@@ -352,14 +466,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising |= ADVERTISED_100baseT_Full;
}
break;
- /* Backplane is set based on supported phy types in get_settings
- * so don't set anything here but don't warn either
- */
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -367,6 +490,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
hw_link_info->phy_type);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and them to
+ * get what is truly supported
+ */
+ i40e_phy_type_to_ethtool(pf, &e_supported,
+ &e_advertising);
+
+ ecmd->supported = ecmd->supported & e_supported;
+ ecmd->advertising = ecmd->advertising & e_advertising;
+
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
@@ -401,74 +534,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
struct ethtool_cmd *ecmd,
struct i40e_pf *pf)
{
- enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- ecmd->supported = 0x0;
- ecmd->advertising = 0x0;
- if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- if (pf->hw.mac.type == I40E_MAC_X722) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
- }
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XFI ||
- phy_types & I40E_CAP_PHY_TYPE_SFI ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
- ecmd->supported |= SUPPORTED_40000baseCR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
- }
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
- ecmd->supported |= SUPPORTED_40000baseSR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
- ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+ i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+ &ecmd->advertising);
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -497,38 +567,6 @@ static int i40e_get_settings(struct net_device *netdev,
i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
-
- /* For backplane, supported and advertised are only reliant on the
- * phy types the NVM specifies are supported.
- */
- if (hw->device_id == I40E_DEV_ID_KX_B ||
- hw->device_id == I40E_DEV_ID_KX_C ||
- hw->device_id == I40E_DEV_ID_20G_KR2 ||
- hw->device_id == I40E_DEV_ID_20G_KR2_A) {
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
- ecmd->supported |= SUPPORTED_40000baseKR4_Full;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
- ecmd->supported |= SUPPORTED_20000baseKR2_Full;
- ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- ecmd->supported |= SUPPORTED_10000baseKR_Full;
- ecmd->advertising |= ADVERTISED_10000baseKR_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
- ecmd->supported |= SUPPORTED_10000baseKX4_Full;
- ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- ecmd->supported |= SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
- }
- }
-
/* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -1143,6 +1181,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ if (pf->hw.pf_id == 0)
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1259,6 +1301,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ u32 __iomem faketail = 0;
+ struct i40e_ring *ring;
+ u16 unused;
+
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
@@ -1267,12 +1316,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ rx_rings[i].tail = (u8 __iomem *)&faketail;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+ /* now allocate the Rx buffers to make sure the OS
+ * has enough memory, any failure here means abort
+ */
+ ring = &rx_rings[i];
+ unused = I40E_DESC_UNUSED(ring);
+ err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
if (err) {
- while (i) {
- i--;
+ do {
i40e_free_rx_resources(&rx_rings[i]);
- }
+ } while (i--);
kfree(rx_rings);
rx_rings = NULL;
@@ -1298,6 +1357,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(vsi->rx_rings[i]);
+ /* get the real tail offset */
+ rx_rings[i].tail = vsi->rx_rings[i]->tail;
+ /* this is to fake out the allocation routine
+ * into thinking it has to realloc everything
+ * but the recycling logic will let us re-use
+ * the buffers allocated above
+ */
+ rx_rings[i].next_to_use = 0;
+ rx_rings[i].next_to_clean = 0;
+ rx_rings[i].next_to_alloc = 0;
+ /* do a struct copy */
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
@@ -1342,7 +1412,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
return I40E_VSI_STATS_LEN(netdev);
}
case ETH_SS_PRIV_FLAGS:
- return I40E_PRIV_FLAGS_STR_LEN;
+ if (pf->hw.pf_id == 0)
+ return I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ return I40E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1540,10 +1613,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ if (pf->hw.pf_id == 0) {
+ for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings_gl[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ } else {
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
}
break;
default:
@@ -1714,7 +1795,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ i40e_close(netdev);
else
/* This reset does not affect link - if it is
* changed to a type of reset that does affect
@@ -1743,7 +1824,7 @@ static void i40e_diag_test(struct net_device *netdev,
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
- dev_open(netdev);
+ i40e_open(netdev);
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1837,7 +1918,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
@@ -2490,7 +2571,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!vsi)
return -EINVAL;
-
pf = vsi->back;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2548,15 +2628,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
if (ntohl(fsp->m_ext.data[1])) {
- if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
- netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+ vf_id = ntohl(fsp->h_ext.data[1]);
+ if (vf_id >= pf->num_alloc_vfs) {
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid VF id %d\n", vf_id);
goto free_input;
}
- vf_id = ntohl(fsp->h_ext.data[1]);
/* Find vsi id from vf id and override dest vsi */
input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
- netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid queue id %d for VF %d\n",
+ input->q_index, vf_id);
goto free_input;
}
}
@@ -2803,18 +2886,18 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_pf *pf = vsi->back;
u32 ret_flags = 0;
- ret_flags |= pf->hw.func_caps.npar_enable ?
- I40E_PRIV_FLAGS_NPAR_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
- I40E_PRIV_FLAGS_PS : 0;
ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+ if (pf->hw.pf_id == 0) {
+ ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+ I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+ }
return ret_flags;
}
@@ -2829,27 +2912,13 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 sw_flags = 0, valid_flags = 0;
bool reset_required = false;
+ bool promisc_change = false;
+ int ret;
/* NOTE: MFP is not settable */
- /* allow the user to control the method of receive
- * buffer DMA, whether the packet is split at header
- * boundaries into two separate buffers. In some cases
- * one routine or the other will perform better.
- */
- if ((flags & I40E_PRIV_FLAGS_PS) &&
- !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- } else if (!(flags & I40E_PRIV_FLAGS_PS) &&
- (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- }
-
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
else
@@ -2876,6 +2945,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
reset_required = true;
}
+ if (pf->hw.pf_id == 0) {
+ if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ }
+ }
+ if (promisc_change) {
+ if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c..58e6c15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,16 +38,6 @@
#include "i40e_fcoe.h"
/**
- * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
* i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
* @sof: the FCoE start of frame delimiter
**/
@@ -1371,7 +1361,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 5ebe12d..a7c7b1d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
- i40e_status ret_code;
+ i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3449129..1cd0ebf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -45,8 +45,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
@@ -326,7 +328,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
unsigned long trans_start;
q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start ? : netdev->trans_start;
+ trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + netdev->watchdog_timeo))) {
@@ -396,24 +398,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
}
/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
-}
-
-/**
* i40e_get_vsi_stats_struct - Get System Network Statistics
* @vsi: the VSI we care about
*
@@ -2097,6 +2081,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
}
+ /* if the VF is not trusted do not do promisc */
+ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ goto out;
+ }
+
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
@@ -2138,7 +2128,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
&vsi->back->hw,
vsi->seid,
- cur_promisc, NULL);
+ cur_promisc, NULL,
+ true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
@@ -2865,34 +2856,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
memset(&rx_ctx, 0, sizeof(rx_ctx));
ring->rx_buf_len = vsi->rx_buf_len;
- ring->rx_hdr_len = vsi->rx_hdr_len;
rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
- rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
- set_ring_16byte_desc_enabled(ring);
- rx_ctx.dsize = 0;
- } else {
- rx_ctx.dsize = 1;
- }
+ /* use 32 byte descriptors */
+ rx_ctx.dsize = 1;
- rx_ctx.dtype = vsi->dtype;
- if (vsi->dtype) {
- set_ring_ps_enabled(ring);
- rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
- I40E_RX_SPLIT_IP |
- I40E_RX_SPLIT_TCP_UDP |
- I40E_RX_SPLIT_SCTP;
- } else {
- rx_ctx.hsplit_0 = 0;
- }
+ /* descriptor type is always zero
+ * rx_ctx.dtype = 0;
+ */
+ rx_ctx.hsplit_0 = 0;
- rx_ctx.rxmax = min_t(u16, vsi->max_frame,
- (chain_len * ring->rx_buf_len));
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0;
else
@@ -2929,12 +2907,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring_is_ps_enabled(ring)) {
- i40e_alloc_rx_headers(ring);
- i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
- } else {
- i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
- }
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
return 0;
}
@@ -2973,40 +2946,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
else
vsi->max_frame = I40E_RXBUFFER_2048;
- /* figure out correct receive buffer length */
- switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
- I40E_FLAG_RX_PS_ENABLED)) {
- case I40E_FLAG_RX_1BUF_ENABLED:
- vsi->rx_hdr_len = 0;
- vsi->rx_buf_len = vsi->max_frame;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
- break;
- case I40E_FLAG_RX_PS_ENABLED:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
- break;
- default:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
- break;
- }
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
#ifdef I40E_FCOE
/* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_hdr_len = 0;
vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
}
#endif /* I40E_FCOE */
/* round up for the chip's needs */
- vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -4164,7 +4115,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
}
@@ -5509,11 +5460,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
*
* Returns 0, this is not allowed to fail
**/
-#ifdef I40E_FCOE
int i40e_close(struct net_device *netdev)
-#else
-static int i40e_close(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5538,8 +5485,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
- if (i40e_check_asq_alive(&pf->hw))
- i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -6377,7 +6322,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event 0x%04x received\n",
+ "ARQ: Unknown event 0x%04x ignored\n",
opcode);
break;
}
@@ -6742,6 +6687,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return;
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
@@ -6862,6 +6809,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
*/
ret = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -7525,10 +7473,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
rx_ring->count = vsi->num_desc;
rx_ring->size = 0;
rx_ring->dcb_tc = 0;
- if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
- set_ring_16byte_desc_enabled(rx_ring);
- else
- clear_ring_16byte_desc_enabled(rx_ring);
rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring;
}
@@ -8084,24 +8028,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
u8 i;
/* Fill out hash function seed */
if (seed) {
u32 *seed_dw = (u32 *)seed;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+ seed_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HKEY1(i, vf_id),
+ seed_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+ }
}
if (lut) {
u32 *lut_dw = (u32 *)lut;
- if (lut_size != I40E_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HLUT1(i, vf_id),
+ lut_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
}
i40e_flush(hw);
@@ -8440,7 +8405,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
- pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -8451,14 +8415,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
- if (iommu_present(&pci_bus_type))
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- else
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -9074,6 +9032,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
+ .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
#if IS_ENABLED(CONFIG_VXLAN)
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -9114,40 +9073,44 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
0;
- netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_HIGHDMA |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- 0;
+ if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->features |= NETIF_F_NTUPLE;
- if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_NTUPLE;
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
+ netdev->hw_features |= netdev->hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9163,6 +9126,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
I40E_VLAN_ANY, false, true);
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
+ } else if ((pf->hw.aq.api_maj_ver > 1) ||
+ ((pf->hw.aq.api_maj_ver == 1) &&
+ (pf->hw.aq.api_min_ver > 4))) {
+ /* Supported in FW API version higher than 1.4 */
+ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9180,12 +9149,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
- /* vlan gets same features (except vlan offload)
- * after any tweaks for specific VSI types
- */
- netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
@@ -9398,7 +9362,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
- I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+ I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
}
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -10444,6 +10409,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u16 flags = 0;
int ret;
/* find out what's out there already */
@@ -10457,6 +10423,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
}
i40e_pf_reset_stats(pf);
+ /* set the switch config bit for the whole device to
+ * support limited promisc or true promisc
+ * when user requests promisc. The default is limited
+ * promisc.
+ */
+
+ if ((pf->hw.pf_id == 0) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+ if (pf->hw.pf_id == 0) {
+ u16 valid_flags;
+
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
/* first time setup */
if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
@@ -10684,11 +10676,9 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+ i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs,
- pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
@@ -10827,6 +10817,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
pf->instance = pfs_found;
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
if (debug != -1) {
pf->msg_enable = pf->hw.debug_mask;
pf->msg_enable = debug;
@@ -10872,12 +10868,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up a default setting for link flow control */
pf->hw.fc.requested_mode = I40E_FC_NONE;
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
err = i40e_init_adminq(hw);
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -11069,6 +11059,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
err = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -11270,7 +11261,6 @@ err_init_lan_hmc:
kfree(pf->qp_pile);
err_sw_init:
err_adminq_setup:
- (void)i40e_shutdown_adminq(hw);
err_pf_reset:
iounmap(hw->hw_addr);
err_ioremap:
@@ -11312,8 +11302,10 @@ static void i40e_remove(struct pci_dev *pdev)
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
+ if (pf->service_timer.data)
+ del_timer_sync(&pf->service_timer);
+ if (pf->service_task.func)
+ cancel_work_sync(&pf->service_task);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5730f80..954efe3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return 0;
}
@@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return 0;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -799,7 +818,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -815,7 +835,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -849,7 +872,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -940,8 +964,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -953,7 +979,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -967,6 +994,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -980,7 +1008,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1030,6 +1059,37 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1189,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index d51eee5..80403c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -130,9 +130,18 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -174,6 +183,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_aqc_get_switch_config_resp *buf,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
enum i40e_aq_resource_access_type access,
@@ -228,10 +241,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -308,6 +317,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 565ca7c..ed39cba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now, then = ns_to_timespec64(delta);
+ struct timespec64 now, then;
unsigned long flags;
+ then = ns_to_timespec64(delta);
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
@@ -288,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
- dev_warn(&vsi->back->pdev->dev,
- "%s: clearing Rx timestamp hang\n",
- __func__);
+ WARN_ONCE(1, "Detected Rx timestamp register hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6a49b7a..99a524d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -1022,7 +1024,6 @@ err:
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -1030,48 +1031,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1080,6 +1055,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -1104,37 +1080,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1155,9 +1100,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1168,6 +1111,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -1186,6 +1130,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1196,160 +1144,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -1358,41 +1268,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ bool ipv4, ipv6, tunnel = false;
+ u32 rx_error, rx_status;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -1438,14 +1342,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* doesn't make it a hard requirement so if we have validated the
* inner checksum report CHECKSUM_UNNECESSARY.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
+ I40E_RX_PTYPE_INNER_PROT_UDP |
+ I40E_RX_PTYPE_INNER_PROT_SCTP))
+ tunnel = true;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ skb->csum_level = tunnel ? 1 : 0;
return;
@@ -1459,7 +1362,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1487,11 +1390,11 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1501,346 +1404,436 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (budget <= 0)
- return 0;
+ if (unlikely(rsyn)) {
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40e_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+ new_buff = &rx_ring->rx_bi[nta];
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+ if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ rx_ring->rx_bi[ntc].skb = skb;
+ return true;
+ }
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40e_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
-
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ if (unlikely(
+ i40e_rx_is_fcoe(rx_ptype) &&
+ !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
dev_kfree_skb_any(skb);
continue;
}
#endif
+
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1849,6 +1842,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1975,9 +1969,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1991,16 +1987,12 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -2247,15 +2239,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -2292,16 +2282,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -2321,9 +2317,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -2405,7 +2400,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -2419,6 +2414,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -2436,13 +2432,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -2453,6 +2442,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -2461,12 +2455,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -2716,6 +2718,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -2723,12 +2727,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -2739,9 +2745,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2891,7 +2898,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2922,7 +2929,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index a9bd705..b78c810 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -184,10 +230,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -216,22 +260,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -258,16 +298,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -301,6 +332,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -324,9 +356,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -377,7 +407,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -423,4 +453,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 3335f9d..bd5f13b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -275,6 +275,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -549,6 +554,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1533,4 +1540,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index ab866cf..c92a3bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -80,10 +80,15 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -157,6 +162,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -165,8 +171,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -325,6 +331,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 816c6bb..1fcafcf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
}
/**
- * i40e_vc_notify_link_state
+ * i40e_vc_notify_vf_link_state
* @vf: pointer to the VF structure
*
* send a link status message to a single VF
@@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
return;
- abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+ abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
}
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
- /* set splitalways mode 10b */
+ /* set split mode 10b */
rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
}
@@ -665,8 +665,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -688,12 +686,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- f = i40e_add_filter(vsi, brdcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
- if (!f)
- dev_info(&pf->pdev->dev,
- "Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
@@ -860,7 +852,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
- set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ if (vf->trusted)
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
* VF req validation
@@ -917,9 +913,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
bool rsd = false;
int i;
- u32 reg;
if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
return;
@@ -937,6 +933,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
i40e_flush(hw);
}
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ i40e_flush(hw);
if (i40e_quiesce_vf_pci(vf))
dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -988,6 +989,7 @@ complete_reset:
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, &pf->state);
}
@@ -1227,8 +1229,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_invalid_msgs++;
- dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
- vf->vf_id, v_opcode, v_retval);
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
if (vf->num_invalid_msgs >
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
dev_err(&pf->pdev->dev,
@@ -1246,9 +1248,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
return -EIO;
}
@@ -1306,8 +1308,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- int i = 0, len = 0;
int num_vsis = 1;
+ int len = 0;
int ret;
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1342,12 +1344,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
}
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
@@ -1356,8 +1362,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+ vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ goto err;
+ }
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ }
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
@@ -1368,16 +1382,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
if (vf->lan_vsi_idx) {
- vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
- vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
- vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+ vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
- vfres->vsi_res[i].qset_handle
+ vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
- ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
- i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1407,6 +1423,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
}
/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ int num_vlans = 0;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
+
+/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1422,22 +1457,128 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
(struct i40e_virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct i40e_vsi *vsi;
+ struct i40e_mac_filter *f;
+ i40e_status aq_ret = 0;
bool allmulti = false;
- i40e_status aq_ret;
+ struct i40e_vsi *vsi;
+ bool alluni = false;
+ int aq_err = 0;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- (vsi->type != I40E_VSI_FCOE)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Lie to the VF on purpose. */
+ aq_ret = 0;
+ goto error_param;
+ }
+ /* Multicast promiscuous handling*/
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- allmulti, NULL);
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+ allmulti,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+ vsi->seid,
+ allmulti,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ break;
+ }
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ goto error_param_int;
+ }
+ }
+
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ if (allmulti)
+ set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+ alluni,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ aq_ret = 0;
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
+ aq_ret =
+ i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+ vsi->seid,
+ alluni,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ }
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL,
+ true);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+ vf->vf_id, info->flags,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+error_param_int:
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ if (alluni)
+ set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ }
error_param:
/* send the response to the VF */
@@ -1688,6 +1829,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
@@ -1708,15 +1853,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
ret = I40E_ERR_INVALID_MAC_ADDR;
} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
!ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
/* If the host VMM administrator has set the VF MAC address
* administratively via the ndo_set_vf_mac command then deny
* permission to the VF to add or delete unicast MAC addresses.
+ * Unless the VF is privileged and then it can do whatever.
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
dev_err(&pf->pdev->dev,
- "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more functionality\n");
ret = -EPERM;
}
return ret;
@@ -1741,7 +1893,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1780,6 +1931,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac++;
}
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1815,7 +1968,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1839,6 +1991,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1873,8 +2027,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
+ if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+ goto error_param;
+ }
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1898,6 +2057,19 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan++;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -1929,7 +2101,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1950,6 +2121,19 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan--;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -2029,6 +2213,135 @@ error_param:
}
/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrk->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrl->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ int len = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_hena);
+
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+ /* send the response back to the VF */
+ aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret, (u8 *)vrh, len);
+ return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(vrh->hena >> 32));
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ aq_ret);
+}
+
+/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2041,7 +2354,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len;
+ int valid_len = 0;
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -2053,13 +2366,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(vf))
valid_len = sizeof(u32);
- else
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -2149,6 +2459,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
sizeof(struct i40e_virtchnl_iwarp_qv_info));
}
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct i40e_virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+ break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2175,11 +2514,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
* called from the common aeq/arq handler to
* process request from VF
**/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
- unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
@@ -2247,6 +2586,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ret = i40e_vc_config_rss_key(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2268,9 +2620,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
**/
int i40e_vc_process_vflr_event(struct i40e_pf *pf)
{
- u32 reg, reg_idx, bit_idx, vf_id;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
struct i40e_vf *vf;
+ int vf_id;
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
@@ -2292,13 +2645,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx)) {
- /* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-
- if (!test_bit(__I40E_DOWN, &pf->state))
- i40e_reset_vf(vf, true);
- }
+ if (reg & BIT(bit_idx))
+ /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+ i40e_reset_vf(vf, true);
}
return 0;
@@ -2762,3 +3111,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ if (!vf)
+ return -EINVAL;
+ if (setting == vf->trusted)
+ goto out;
+
+ vf->trusted = setting;
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, setting ? "" : "un");
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e7b2fba..8751741 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -61,6 +61,8 @@ enum i40e_vf_states {
I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
+ I40E_VF_STAT_MC_PROMISC,
+ I40E_VF_STAT_UC_PROMISC,
};
/* VF capabilities */
@@ -75,7 +77,7 @@ struct i40e_vf {
struct i40e_pf *pf;
/* VF id in the PF space */
- u16 vf_id;
+ s16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
struct i40e_virtchnl_version_info vf_ver;
@@ -88,6 +90,7 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
+ bool trusted;
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -108,6 +111,9 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+
/* RDMA Client */
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
@@ -115,7 +121,7 @@ struct i40e_vf {
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
@@ -127,6 +133,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a3eae5d..1f9b3b5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aad8d62..3114dcf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 771ac6a..8f64204 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -58,6 +58,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index ca8b58c..d34972b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -44,6 +44,8 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index cea97da..fd7dae46 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -494,7 +496,6 @@ err:
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -502,48 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -552,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -576,37 +552,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40evf_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40evf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -627,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -640,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -658,6 +602,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -668,160 +616,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
return false;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (bi->skb) /* desc is in use */
- goto no_buffers;
-
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -830,41 +740,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ bool ipv4, ipv6, tunnel = false;
+ u32 rx_error, rx_status;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -910,14 +814,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* doesn't make it a hard requirement so if we have validated the
* inner checksum report CHECKSUM_UNNECESSARY.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
+ I40E_RX_PTYPE_INNER_PROT_UDP |
+ I40E_RX_PTYPE_INNER_PROT_SCTP))
+ tunnel = true;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ skb->csum_level = tunnel ? 1 : 0;
return;
@@ -931,7 +834,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -959,7 +862,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
@@ -973,313 +876,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40evf_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
-
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ new_buff = &rx_ring->rx_bi[nta];
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40evf_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
+
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* populate checksum, VLAN, and protocol */
+ i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1288,6 +1289,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1411,9 +1413,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1427,16 +1431,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -1514,15 +1514,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -1559,16 +1557,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1588,9 +1592,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -1630,7 +1633,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -1644,6 +1647,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -1661,13 +1665,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -1678,6 +1675,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -1686,12 +1688,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -1935,6 +1945,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -1942,12 +1954,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -1958,9 +1972,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2109,7 +2124,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2140,7 +2155,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0429553..0112277 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -183,10 +229,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -215,22 +259,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -249,16 +289,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -290,6 +321,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -313,9 +345,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -405,4 +435,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 301fe2b..97f96e0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -258,6 +258,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -522,6 +527,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1329,4 +1336,46 @@ enum i40e_reset_type {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director and flexible payload */
+#define I40E_FD_INSET_L3_SRC_SHIFT 47
+#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_SRC_SHIFT)
+#define I40E_FD_INSET_L3_DST_SHIFT 35
+#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_DST_SHIFT)
+#define I40E_FD_INSET_L4_SRC_SHIFT 34
+#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_SRC_SHIFT)
+#define I40E_FD_INSET_L4_DST_SHIFT 33
+#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_DST_SHIFT)
+#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31
+#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_VERIFY_TAG_SHIFT)
+
+#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17
+#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD50_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16
+#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD51_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15
+#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD52_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14
+#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD53_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13
+#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD54_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12
+#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD55_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11
+#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD56_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
+#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 3b9d203..f04ce6c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -80,7 +80,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index e657ecc..76ed97d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -67,8 +67,6 @@ struct i40e_vsi {
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 qs_handle;
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -82,9 +80,6 @@ struct i40e_vsi {
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
-#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
-#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
#define I40EVF_RXBUFFER_2048 2048
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
@@ -210,9 +205,6 @@ struct i40evf_adapter {
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
-#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
-#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
@@ -222,6 +214,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+#define I40EVF_FLAG_PROMISC_ON BIT(15)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -239,8 +233,17 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
/* OS defined structs */
struct net_device *netdev;
@@ -256,10 +259,18 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+ (_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ 0)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
+ (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -271,11 +282,16 @@ struct i40evf_adapter {
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
u32 aq_wait_count;
+ /* RSS stuff */
+ u64 hena;
+ u16 rss_key_size;
+ u16 rss_lut_size;
+ u8 *rss_key;
+ u8 *rss_lut;
};
/* Ethtool Private Flags */
-#define I40EVF_PRIV_FLAGS_PS BIT(0)
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -314,11 +330,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dd4430a..c9c202f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
-static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "packet-split",
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
-
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
- } else if (sset == ETH_SS_PRIV_FLAGS) {
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40evf_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
}
}
@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -378,63 +363,6 @@ static int i40evf_set_coalesce(struct net_device *netdev,
}
/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- struct i40e_hw *hw = &adapter->hw;
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* We always hash on IP src and dest addresses */
- cmd->data = RXH_IP_SRC | RXH_IP_DST;
-
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case IPV4_FLOW:
- break;
-
- case TCP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case IPV6_FLOW:
- break;
- default:
- cmd->data = 0;
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
ret = 0;
break;
case ETHTOOL_GRXFH:
- ret = i40evf_get_rss_hash_opts(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-/**
- * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *nfc)
-{
- struct i40e_hw *hw = &adapter->hw;
- u32 flags = adapter->vf_res->vf_offload_flags;
-
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
-
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case TCP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- } else {
- return -EINVAL;
- }
- break;
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case SCTP_V4_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- break;
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- break;
- case IPV4_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case IPV6_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
- return -EINVAL;
- }
-
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
- i40e_flush(hw);
-
- return 0;
-}
-
-/**
- * i40evf_set_rxnfc - command to set RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_set_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ netdev_info(netdev,
+ "RSS hash info is not available to vf, use pf.\n");
break;
default:
break;
@@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev,
return ret;
}
-
/**
* i40evf_get_channels: get the number of channels supported by the device
* @netdev: network interface device structure
@@ -624,6 +414,19 @@ static void i40evf_get_channels(struct net_device *netdev,
}
/**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_key_size;
+}
+
+/**
* i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
* @netdev: network interface device structure
*
@@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev,
**/
static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
{
- return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_lut_size;
}
/**
@@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL, *lut;
- int ret;
u16 i;
if (hfunc)
@@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!indir)
return 0;
- seed = key;
-
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- if (ret)
- goto out;
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)lut[i];
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
-out:
- kfree(lut);
-
- return ret;
+ return 0;
}
/**
@@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL;
u16 i;
/* We do not allow change in unsupported parameters */
@@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
if (key) {
- if (!vsi->rss_hkey_user) {
- vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_hkey_user)
- return -ENOMEM;
- }
- memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
- seed = vsi->rss_hkey_user;
- }
- if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_lut_user)
- return -ENOMEM;
+ memcpy(adapter->rss_key, key, adapter->rss_key_size);
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
-
- return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
- I40EVF_HLUT_ARRAY_SIZE);
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *dev)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- u32 ret_flags = 0;
-
- ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
- I40EVF_PRIV_FLAGS_PS : 0;
-
- return ret_flags;
-}
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
-/**
- * i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- bool reset_required = false;
-
- if ((flags & I40EVF_PRIV_FLAGS_PS) &&
- !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- } else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
- (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- }
-
- /* if needed, issue reset to cause things to take effect */
- if (reset_required)
- i40evf_schedule_reset(adapter);
-
- return 0;
+ return i40evf_config_rss(adapter);
}
static const struct ethtool_ops i40evf_ethtool_ops = {
@@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
- .get_priv_flags = i40evf_get_priv_flags,
- .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
- .set_rxnfc = i40evf_set_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
+ .get_rxfh_key_size = i40evf_get_rxfh_key_size,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae..642bb45 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 15
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i;
- int rx_buf_len;
-
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
- netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = I40EVF_RXBUFFER_2048;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- set_ring_ps_enabled(&adapter->rx_rings[i]);
- adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
- } else {
- clear_ring_ps_enabled(&adapter->rx_rings[i]);
- }
+ adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
}
}
@@ -943,6 +926,21 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
bottom_of_search_loop:
continue;
}
+
+ if (netdev->flags & IFF_PROMISC &&
+ !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ else if (!(netdev->flags & IFF_PROMISC) &&
+ adapter->flags & I40EVF_FLAG_PROMISC_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI &&
+ !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ else if (!(netdev->flags & IFF_ALLMULTI) &&
+ adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -999,14 +997,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i];
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- i40evf_alloc_rx_headers(ring);
- i40evf_alloc_rx_buffers_ps(ring, ring->count);
- } else {
- i40evf_alloc_rx_buffers_1buf(ring, ring->count);
- }
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, ring->tail);
+ i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
}
}
@@ -1224,24 +1215,18 @@ out:
}
/**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
*
* Return 0 on success, negative on failure
**/
-static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_aqc_get_set_rss_key_data *rss_key =
+ (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (!vsi->id)
- return -EINVAL;
-
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
@@ -1249,198 +1234,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
return -EBUSY;
}
- if (seed) {
- struct i40e_aqc_get_set_rss_key_data *rss_key =
- (struct i40e_aqc_get_set_rss_key_data *)seed;
- ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
- if (ret) {
- dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+
}
- if (lut) {
- ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
}
return ret;
+
}
/**
* i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw;
+ u32 *dw;
u16 i;
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
+ dw = (u32 *)adapter->rss_key;
+ for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
+ dw = (u32 *)adapter->rss_lut;
+ for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
- }
i40e_flush(hw);
return 0;
}
/**
- * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- int ret = 0;
-
- if (seed) {
- ret = i40evf_aq_get_rss_key(hw, vsi->id,
- (struct i40e_aqc_get_set_rss_key_data *)seed);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- if (lut) {
- ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- return ret;
-}
-
-/**
- * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- u16 i;
-
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
-
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
- }
-
- return 0;
-}
-
-/**
* i40evf_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
-
- if (RSS_AQ(adapter))
- return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
-}
-
-/**
- * i40evf_get_rss - Get RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
+int i40evf_config_rss(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
- if (RSS_AQ(adapter))
- return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
+ if (RSS_PF(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+ I40EVF_FLAG_AQ_SET_RSS_KEY;
+ return 0;
+ } else if (RSS_AQ(adapter)) {
+ return i40evf_config_rss_aq(adapter);
+ } else {
+ return i40evf_config_rss_reg(adapter);
+ }
}
/**
* i40evf_fill_rss_lut - Fill the lut with default values
- * @lut: Lookup table to be filled with
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * @adapter: board private structure
**/
-static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
{
u16 i;
- for (i = 0; i < rss_table_size; i++)
- lut[i] = i % rss_size;
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = i % adapter->num_active_queues;
}
/**
@@ -1451,42 +1320,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
**/
static int i40evf_init_rss(struct i40evf_adapter *adapter)
{
- struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
- u8 seed[I40EVF_HKEY_ARRAY_SIZE];
- u64 hena;
- u8 *lut;
int ret;
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
- else
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ if (!RSS_PF(adapter)) {
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+ else
+ adapter->hena = I40E_DEFAULT_RSS_HENA;
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ }
- /* Use user configured lut if there is one, otherwise use default */
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
- else
- i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
- adapter->num_active_queues);
+ i40evf_fill_rss_lut(adapter);
- /* Use user configured hash key if there is one, otherwise
- * user default.
- */
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
- else
- netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
- ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- kfree(lut);
+ netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+ ret = i40evf_config_rss(adapter);
return ret;
}
@@ -1507,7 +1359,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
- goto err_out;
+ return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1371,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
}
return 0;
-
-err_out:
- while (q_idx) {
- q_idx--;
- q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
- }
- kfree(adapter->q_vectors);
- return -ENOMEM;
}
/**
@@ -1610,19 +1453,16 @@ err_set_interrupt:
}
/**
- * i40evf_clear_rss_config_user - Clear user configurations of RSS
- * @vsi: Pointer to VSI structure
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
**/
-static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
{
- if (!vsi)
- return;
-
- kfree(vsi->rss_hkey_user);
- vsi->rss_hkey_user = NULL;
+ kfree(adapter->rss_key);
+ adapter->rss_key = NULL;
- kfree(vsi->rss_lut_user);
- vsi->rss_lut_user = NULL;
+ kfree(adapter->rss_lut);
+ adapter->rss_lut = NULL;
}
/**
@@ -1756,6 +1596,39 @@ static void i40evf_watchdog_task(struct work_struct *work)
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+ i40evf_get_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+ i40evf_set_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+ i40evf_set_rss_key(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+ i40evf_set_rss_lut(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+ (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+ i40evf_set_promiscuous(adapter, 0);
+ goto watchdog_done;
+ }
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -2003,6 +1876,8 @@ static void i40evf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
+ if (val == 0xdeadbeef) /* indicates device in reset */
+ goto freedom;
oldval = val;
if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
@@ -2259,6 +2134,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
+ NETIF_F_HW_VLAN_CTAG_RX |\
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/**
+ * i40evf_fix_features - fix up the netdev feature bits
+ * @netdev: our net device
+ * @features: desired feature bits
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t i40evf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ features &= ~I40EVF_VLAN_FEATURES;
+ if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ features |= I40EVF_VLAN_FEATURES;
+ return features;
+}
+
static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
@@ -2271,6 +2168,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+ .ndo_fix_features = i40evf_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
@@ -2307,57 +2205,61 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
+ struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
/* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ for (i = 0; i < vfres->num_vsis; i++) {
+ if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
return -ENODEV;
}
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPIP |
+ NETIF_F_GSO_SIT |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
+
+ /* Write features and hw_features separately to avoid polluting
+ * with, or dropping, features that are set when we registgered.
+ */
+ netdev->hw_features |= netdev->hw_enc_features;
+
+ netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+ /* disable VLAN features if not supported */
+ if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
+ netdev->features ^= I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2368,8 +2270,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
- adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = vfres->rss_key_size;
+ adapter->rss_lut_size = vfres->rss_lut_size;
+ } else {
+ adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+ }
+
return 0;
}
@@ -2502,11 +2412,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
- adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
-
- /* Default to single buffer rx, can be changed through ethtool. */
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
@@ -2565,6 +2470,11 @@ static void i40evf_init_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+ adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+ adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+ if (!adapter->rss_key || !adapter->rss_lut)
+ goto err_mem;
+
if (RSS_AQ(adapter)) {
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
@@ -2575,7 +2485,8 @@ static void i40evf_init_task(struct work_struct *work)
restart:
schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
-
+err_mem:
+ i40evf_free_rss(adapter);
err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
@@ -2838,11 +2749,11 @@ static void i40evf_remove(struct pci_dev *pdev)
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
if (!i40evf_asq_done(hw)) {
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
}
if (adapter->msix_entries) {
@@ -2857,8 +2768,7 @@ static void i40evf_remove(struct pci_dev *pdev)
flush_scheduled_work();
- /* Clear user configurations for RSS */
- i40evf_clear_rss_config_user(&adapter->vsi);
+ i40evf_free_rss(adapter);
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
@@ -2869,7 +2779,6 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
-
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 488e738..f134456 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- vqpi->rxq.splithdr_enabled = true;
- vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
- }
vqpi++;
}
@@ -645,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
struct i40e_virtchnl_promisc_info vpi;
+ int promisc_all;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -652,6 +649,27 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
adapter->current_op);
return;
}
+
+ promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC;
+ if ((flags & promisc_all) == promisc_all) {
+ adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
+ if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!flags) {
+ adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
@@ -681,6 +699,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_hena vrh;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh.hena = adapter->hena;
+ adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_key *vrk;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_key) +
+ (adapter->rss_key_size * sizeof(u8)) - 1;
+ vrk = kzalloc(len, GFP_KERNEL);
+ if (!vrk)
+ return;
+ vrk->vsi_id = adapter->vsi.id;
+ vrk->key_len = adapter->rss_key_size;
+ memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)vrk, len);
+ kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_lut *vrl;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_lut) +
+ (adapter->rss_lut_size * sizeof(u8)) - 1;
+ vrl = kzalloc(len, GFP_KERNEL);
+ if (!vrl)
+ return;
+ vrl->vsi_id = adapter->vsi.id;
+ vrl->lut_entries = adapter->rss_lut_size;
+ memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)vrl, len);
+ kfree(vrl);
+}
+
/**
* i40evf_request_reset
* @adapter: adapter structure
@@ -820,6 +947,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ if (msglen == sizeof(*vrh))
+ adapter->hena = vrh->hena;
+ else
+ dev_warn(&adapter->pdev->dev,
+ "Invalid message %d from PF\n", v_opcode);
+ }
+ break;
default:
if (v_opcode != adapter->current_op)
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a23aa67..a61447f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
if (size > 15)
size = 15;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
@@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
16 : 8;
break;
}
- if (nvm->word_size == (1 << 15))
+ if (nvm->word_size == BIT(15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
@@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->ops.write = igb_write_nvm_spi;
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
- if (nvm->word_size < (1 << 15))
+ if (nvm->word_size < BIT(15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
@@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
/* The PF can spoof - it has to in order to
* support emulation mode NICs
*/
- reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
} else {
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
E1000_DTXSWC_VLAN_SPOOF_MASK);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index de8805a..199ff98 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
@@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
-#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_FILTER_ENABLE BIT(26)
+#define E1000_ETQF_1588 BIT(30)
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
@@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN BIT(30)
/* Per VM Offload register setup */
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
@@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXCTL_MDP_EN 0x0020
#define E1000_DTXCTL_SPOOF_INT 0x0040
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14)
#define ALL_QUEUES 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index e9f23ee..2997c44 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -530,65 +530,65 @@
/* Time Sync Interrupt Cause/Mask Register Bits */
-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */
+#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */
+#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */
#define TSYNC_INTERRUPTS TSINTR_TXTS
#define E1000_TSICR_TXTS TSINTR_TXTS
/* TSAUXC Configuration Bits */
-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */
+#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */
/* SDP Configuration Bits */
-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
+#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
@@ -997,8 +997,8 @@
#define E1000_M88E1543_FIBER_CTRL 0x0
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */
#define E1000_PCS_STATUS_DEV_I354 3
#define E1000_PCS_STATUS_ADDR_I354 1
#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 07cf4fe..5010e22 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = adapter->shadow_vfta[regidx];
/* vfta_delta represents the difference between the current value
@@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
bits = rd32(E1000_VLVF(vlvf_index));
/* set the pool bit */
- bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
/* Clear VFTA first, then disable VLVF. Otherwise
@@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
mta = array_rd32(E1000_MTA, hash_reg);
- mta |= (1 << hash_bit);
+ mta |= BIT(hash_bit);
array_wr32(E1000_MTA, hash_reg, mta);
wrfl();
@@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 10f5c9e..00e263f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
u32 vflre = rd32(E1000_VFLRE);
s32 ret_val = -E1000_ERR_MBX;
- if (vflre & (1 << vf_number)) {
+ if (vflre & BIT(vf_number)) {
ret_val = 0;
- wr32(E1000_VFLRE, (1 << vf_number));
+ wr32(E1000_VFLRE, BIT(vf_number));
hw->mbx.stats.rsts++;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e8280d0..3582c5c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = rd32(E1000_EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = 1u << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 969a6dd..9b622b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -91,10 +91,10 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define I82580_ADDR_REG 16
#define I82580_CFG_REG 22
-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */
#define I82580_CTRL_REG 23
-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
+#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10)
/* 82580 specific PHY registers */
#define I82580_PHY_CTRL_2 18
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9413fa6..b9609af 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -91,6 +91,14 @@ struct igb_adapter;
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGB_I210_TX_LATENCY_10 9542
+#define IGB_I210_TX_LATENCY_100 1024
+#define IGB_I210_TX_LATENCY_1000 178
+#define IGB_I210_RX_LATENCY_10 20662
+#define IGB_I210_RX_LATENCY_100 2213
+#define IGB_I210_RX_LATENCY_1000 448
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -169,7 +177,7 @@ enum igb_tx_flags {
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
@@ -466,21 +474,21 @@ struct igb_adapter {
u16 eee_advert;
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
-#define IGB_FLAG_MEDIA_RESET (1 << 10)
-#define IGB_FLAG_MAS_CAPABLE (1 << 11)
-#define IGB_FLAG_MAS_ENABLE (1 << 12)
-#define IGB_FLAG_HAS_MSIX (1 << 13)
-#define IGB_FLAG_EEE (1 << 14)
+#define IGB_FLAG_HAS_MSI BIT(0)
+#define IGB_FLAG_DCA_ENABLED BIT(1)
+#define IGB_FLAG_QUAD_PORT_A BIT(2)
+#define IGB_FLAG_QUEUE_PAIRS BIT(3)
+#define IGB_FLAG_DMAC BIT(4)
+#define IGB_FLAG_PTP BIT(5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+#define IGB_FLAG_WOL_SUPPORTED BIT(8)
+#define IGB_FLAG_NEED_LINK_UPDATE BIT(9)
+#define IGB_FLAG_MEDIA_RESET BIT(10)
+#define IGB_FLAG_MAS_CAPABLE BIT(11)
+#define IGB_FLAG_MAS_ENABLE BIT(12)
+#define IGB_FLAG_HAS_MSIX BIT(13)
+#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
/* Media Auto Sense */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7982243..64e91c5 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev,
memset(p, 0, IGB_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(E1000_CTRL);
@@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 31; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!(mask & ics_mask))
continue;
@@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
- info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
/* 82576 does not support timestamping all packets. */
if (adapter->hw.mac.type >= e1000_82580)
- info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
else
info->rx_filters |=
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
default:
@@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+ status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
if (status) {
/* Error occurred while reading module */
kfree(dataword);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55a1405c..2172769 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -50,6 +50,7 @@
#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/pm_runtime.h>
+#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -150,7 +151,7 @@ static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
@@ -382,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -835,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue & 0x7,
((tx_queue & 0x8) << 1) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
case e1000_82580:
case e1000_i350:
@@ -856,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue >> 1,
((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
default:
BUG();
@@ -918,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
E1000_GPIE_NSICR);
/* enable msix_other interrupt */
- adapter->eims_other = 1 << vector;
+ adapter->eims_other = BIT(vector);
tmp = (vector++ | E1000_IVAR_VALID) << 8;
wr32(E1000_IVAR_MISC, tmp);
@@ -2086,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
+#define IGB_MAX_MAC_HDR_LEN 127
+#define IGB_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igb_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2110,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igb_features_check,
};
/**
@@ -2376,38 +2411,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC;
+#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPIP | \
+ NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
+
/* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL;
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- /* set this bit last since it cannot be part of hw_features */
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
- netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2442,9 +2482,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* copy the MAC address out of the NVM */
- if (hw->mac.ops.read_mac_addr(hw))
- dev_err(&pdev->dev, "NVM Read Error\n");
+ if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+ }
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
@@ -4061,7 +4103,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
u32 vlvf = rd32(E1000_VLVF(i));
- vlvf |= 1 << pf_id;
+ vlvf |= BIT(pf_id);
wr32(E1000_VLVF(i), vlvf);
}
@@ -4088,7 +4130,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
/* guarantee that we don't scrub out management VLAN */
vid = adapter->mng_vlan_id;
if (vid >= vid_start && vid < vid_end)
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
if (!adapter->vfs_allocated_count)
goto set_vfta;
@@ -4107,7 +4149,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
if (vlvf & E1000_VLVF_VLANID_ENABLE) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4115,7 +4157,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
}
/* remove PF from the pool */
- bits = ~(1 << pf_id);
+ bits = ~BIT(pf_id);
bits &= rd32(E1000_VLVF(i));
wr32(E1000_VLVF(i), bits);
}
@@ -4273,13 +4315,13 @@ static void igb_spoof_check(struct igb_adapter *adapter)
return;
for (j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ if (adapter->wvbr & BIT(j) ||
+ adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
dev_warn(&adapter->pdev->dev,
"Spoof event(s) detected on VF %d\n", j);
adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ ~(BIT(j) |
+ BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
}
}
}
@@ -4839,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4854,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM |
IGB_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* MSS L4LEN IDX */
- mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
/* VLAN MACLEN IPLEN */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
@@ -5960,11 +6018,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
/* create mask for VF and other pools */
pool_mask = E1000_VLVF_POOLSEL_MASK;
- vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+ vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
/* drop PF from pool bits */
- pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
- adapter->vfs_allocated_count));
+ pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
/* Find the vlan filter for this id */
for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
@@ -5987,7 +6045,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
goto update_vlvf;
vid = vlvf & E1000_VLVF_VLANID_MASK;
- vfta_mask = 1 << (vid % 32);
+ vfta_mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = adapter->shadow_vfta[vid / 32];
@@ -6024,7 +6082,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
return idx;
}
-void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
+static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
{
struct e1000_hw *hw = &adapter->hw;
u32 bits, pf_id;
@@ -6038,13 +6096,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
* entry other than the PF.
*/
pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
- bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+ bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
bits &= rd32(E1000_VLVF(idx));
/* Disable the filter so this falls into the default pool. */
if (!bits) {
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
- wr32(E1000_VLVF(idx), 1 << pf_id);
+ wr32(E1000_VLVF(idx), BIT(pf_id));
else
wr32(E1000_VLVF(idx), 0);
}
@@ -6228,9 +6286,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
- wr32(E1000_VFTE, reg | (1 << vf));
+ wr32(E1000_VFTE, reg | BIT(vf));
reg = rd32(E1000_VFRE);
- wr32(E1000_VFRE, reg | (1 << vf));
+ wr32(E1000_VFRE, reg | BIT(vf));
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
@@ -6522,13 +6580,14 @@ static int igb_poll(struct napi_struct *napi, int budget)
igb_update_dca(q_vector);
#endif
if (q_vector->tx.ring)
- clean_complete = igb_clean_tx_irq(q_vector);
+ clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
int cleaned = igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
- clean_complete &= (cleaned < budget);
+ if (cleaned >= budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -6545,10 +6604,11 @@ static int igb_poll(struct napi_struct *napi, int budget)
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ * @napi_budget: Used to determine if we are in netpoll
*
* returns true if ring is completely cleaned
**/
-static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
@@ -6587,7 +6647,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -7574,7 +7634,6 @@ static int igb_resume(struct device *dev)
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- rtnl_unlock();
return -ENOMEM;
}
@@ -7845,11 +7904,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to CPU endian
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
*/
- rar_low = le32_to_cpup((__be32 *)(addr));
- rar_high = le16_to_cpup((__be16 *)(addr + 4));
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
rar_high |= E1000_RAH_AV;
@@ -7921,7 +7982,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+ rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
@@ -8011,11 +8072,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
reg_val = rd32(reg_offset);
if (setting)
- reg_val |= ((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val |= (BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
else
- reg_val &= ~((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val &= ~(BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 22a8a29..f097c5a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,9 +69,9 @@
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
-#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
@@ -722,11 +722,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
+ int adjust = 0;
regval = rd32(E1000_TXSTMPL);
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ /* adjust timestamp for the TX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_TX_LATENCY_1000;
+ break;
+ }
+ }
+
+ shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
@@ -771,6 +789,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
+ int adjust = 0;
/* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -790,6 +809,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
*/
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index ae3f283..ee1ef08 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -113,7 +113,7 @@
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index b74ce53..8dea1b1 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev,
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f166baa..6f4290d 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -287,8 +287,8 @@ struct igbvf_info {
};
/* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index c124422..322a2d7 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFFFF00;
ivar |= msix_vector | E1000_IVAR_VALID;
}
- adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+ adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
if (tx_queue > IGBVF_NO_QUEUE) {
@@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFF00FF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
}
- adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+ adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
}
@@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
ew32(IVAR_MISC, tmp);
- adapter->eims_enable_mask = (1 << (vector)) - 1;
- adapter->eims_other = 1 << (vector - 1);
+ adapter->eims_enable_mask = GENMASK(vector - 1, 0);
+ adapter->eims_other = BIT(vector - 1);
e1e_flush();
}
@@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct igbvf_ring *rx_ring = adapter->rx_ring;
u64 rdba;
- u32 rdlen, rxdctl;
+ u32 rxdctl;
/* disable receives */
rxdctl = er32(RXDCTL(0));
@@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
e1e_flush();
msleep(10);
- rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
@@ -1933,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
buffer_info->dma = 0;
}
-static int igbvf_tso(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
- __be16 protocol)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- unsigned int i;
+static int igbvf_tso(struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
- *hdr_len = 0;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
err = skb_cow_head(skb, 0);
- if (err < 0) {
- dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+ if (err < 0)
return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
- i = tx_ring->next_to_use;
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- info |= (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(info);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+ ip.v4->tot_len = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
- if (protocol == htons(ETH_P_IP))
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* MSS L4LEN IDX */
- mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
- context_desc->seqnum_seed = 0;
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
- buffer_info->time_stamp = jiffies;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
- tx_ring->next_to_use = i;
+ igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
- return true;
+ return 1;
}
static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
@@ -2091,7 +2080,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
}
#define IGBVF_MAX_TXD_PWR 16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
@@ -2271,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
+ tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (unlikely(tso < 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2615,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev,
return 0;
}
+#define IGBVF_MAX_MAC_HDR_LEN 127
+#define IGBVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igbvf_netdev_ops = {
.ndo_open = igbvf_open,
.ndo_stop = igbvf_close,
@@ -2631,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_poll_controller = igbvf_netpoll,
#endif
.ndo_set_features = igbvf_set_features,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igbvf_features_check,
};
/**
@@ -2739,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPIP | \
+ NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IGBVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
-
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index a13baa9..335ba66 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to true indicates "add" */
if (set)
- msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT);
mbx->ops.write_posted(hw, msgbuf, 2);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e4949af..9f2db18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -143,14 +143,11 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
- u16 default_vf_vlan_id;
- u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
- u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
@@ -173,7 +170,7 @@ struct vf_macvlans {
};
#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
@@ -456,7 +453,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
IXGBE_QV_STATE_POLL);
#ifdef BP_EXTENDED_STATS
if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->tx.ring->stats.yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
return rc == IXGBE_QV_STATE_IDLE;
}
@@ -623,44 +620,45 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
+#define IXGBE_FLAG_MSI_ENABLED BIT(1)
+#define IXGBE_FLAG_MSIX_ENABLED BIT(3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
+#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
+#define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
+#define IXGBE_FLAG_DCA_ENABLED BIT(8)
+#define IXGBE_FLAG_DCA_CAPABLE BIT(9)
+#define IXGBE_FLAG_IMIR_ENABLED BIT(10)
+#define IXGBE_FLAG_MQ_CAPABLE BIT(11)
+#define IXGBE_FLAG_DCB_ENABLED BIT(12)
+#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
+#define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
+#define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
+#define IXGBE_FLAG_FCOE_ENABLED BIT(21)
+#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
+#define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
+#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
+#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
+#define IXGBE_FLAG2_RSC_ENABLED BIT(1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
+#define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
+#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
@@ -795,7 +793,7 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
#define IXGBE_MAX_LINK_HANDLE 10
- struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+ struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
unsigned long tables;
/* maximum number of RETA entries among all devices supported by ixgbe
@@ -806,6 +804,8 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+
+ bool need_crosstalk_fix;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -817,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
return IXGBE_MAX_RSS_INDICES;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -827,7 +828,7 @@ struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
enum ixgbe_state_t {
@@ -860,13 +861,15 @@ enum ixgbe_boards {
board_X540,
board_X550,
board_X550EM_x,
+ board_x550em_a,
};
-extern struct ixgbe_info ixgbe_82598_info;
-extern struct ixgbe_info ixgbe_82599_info;
-extern struct ixgbe_info ixgbe_X540_info;
-extern struct ixgbe_info ixgbe_X550_info;
-extern struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_82598_info;
+extern const struct ixgbe_info ixgbe_82599_info;
+extern const struct ixgbe_info ixgbe_X540_info;
+extern const struct ixgbe_info ixgbe_X550_info;
+extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_a_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif
@@ -893,8 +896,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id);
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d8a9fb8..fb51be7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -792,7 +792,7 @@ mac_reset_top:
}
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
- gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/*
@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on)
/* Turn on this VLAN id */
- bits |= (1 << bitindex);
+ bits |= BIT(bitindex);
else
/* Turn off this VLAN id */
- bits &= ~(1 << bitindex);
+ bits &= ~BIT(bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
return 0;
@@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
}
-static struct ixgbe_mac_operations mac_ops_82598 = {
+static const struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
.start_hw = &ixgbe_start_hw_82598,
@@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic,
@@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
@@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82598 = {
+static const struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
@@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82598_info = {
+const struct ixgbe_info ixgbe_82598_info = {
.mac = ixgbe_mac_82598EB,
.get_invariants = &ixgbe_get_invariants_82598,
.mac_ops = &mac_ops_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index fa8d4f4..47afed7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
common_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
sig_hash ^= lo_hash_dword << (16 - n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
common_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
sig_hash ^= hi_hash_dword << (16 - n); \
} while (0)
@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
} while (0)
@@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
@@ -2181,7 +2182,7 @@ release_i2c_access:
return status;
}
-static struct ixgbe_mac_operations mac_ops_82599 = {
+static const struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
.start_hw = &ixgbe_start_hw_82599,
@@ -2220,6 +2221,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
@@ -2227,6 +2229,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.prot_autoc_read = &prot_autoc_read_82599,
@@ -2235,7 +2238,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eeprom_82599,
.read_buffer = &ixgbe_read_eeprom_buffer_82599,
@@ -2246,7 +2249,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82599 = {
+static const struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
@@ -2263,7 +2266,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82599_info = {
+const struct ixgbe_info ixgbe_82599_info = {
.mac = ixgbe_mac_82599EB,
.get_invariants = &ixgbe_get_invariants_82599,
.mac_ops = &mac_ops_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 6404505..902d206 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
@@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_setup_fc_generic - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
@@ -296,7 +297,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ret_val = ixgbe_setup_fc(hw);
+ ret_val = hw->mac.ops.setup_fc(hw);
if (ret_val)
return ret_val;
@@ -681,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
+ u16 ee_ctrl_4;
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
@@ -691,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -816,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -1493,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count
*/
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
for (i = 0; i < count; i++) {
/*
@@ -1982,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+ hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
}
/**
@@ -2854,6 +2863,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -2911,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_hi = 0;
}
} else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
+ mpsar_lo &= ~BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- mpsar_hi &= ~(1 << (vmdq - 32));
+ mpsar_hi &= ~BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
@@ -2943,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
+ mpsar |= BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
+ mpsar |= BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return 0;
@@ -2968,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
u32 rar = hw->mac.san_mac_rar_index;
if (vmdq < 32) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
} else {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
}
return 0;
@@ -3072,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
/* vfta_delta represents the difference between the current value
@@ -3103,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
/* set the pool bit */
- bits |= 1 << (vind % 32);
+ bits |= BIT(vind % 32);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (vind % 32);
+ bits ^= BIT(vind % 32);
if (!bits &&
!IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
@@ -3300,43 +3310,25 @@ wwn_prefix_err:
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < pf_target_reg; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Do not set the bits assigned to the PF
- */
- pfvfspoof &= (1 << pf_target_shift) - 1;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * Remaining pools belong to the PF so they do not need to have
- * anti-spoofing enabled.
- */
- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+ pfvfspoof |= BIT(vf_target_shift);
+ else
+ pfvfspoof &= ~BIT(vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
@@ -3357,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -3483,18 +3475,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
- u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u32 hicr, i, bi, fwsts;
u16 buf_len, dword_len;
+ union {
+ struct ixgbe_hic_hdr hdr;
+ u32 u32arr[1];
+ } *bp = buffer;
+ s32 status;
- if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3502,26 +3503,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if ((hicr & IXGBE_HICR_EN) == 0) {
+ if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
+ if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto rel_out;
}
dword_len = length >> 2;
- /*
- * The device driver writes the relevant command block
+ /* The device driver writes the relevant command block
* into the ram area.
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(buffer[i]));
+ i, cpu_to_le32(bp->u32arr[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3534,44 +3536,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
- if ((timeout != 0 && i == timeout) ||
- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ if ((timeout && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
hw_dbg(hw, "Command has failed with no status valid.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
/* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
- if (buf_len == 0)
- return 0;
+ buf_len = bp->hdr.buf_len;
+ if (!buf_len)
+ goto rel_out;
- if (length < (buf_len + hdr_size)) {
+ if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (bi is where we left off)*/
+ /* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -3594,13 +3601,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
int i;
s32 ret_val;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
- return IXGBE_ERR_SWFW_SYNC;
-
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.port_num = hw->bus.func;
fw_cmd.ver_maj = maj;
fw_cmd.ver_min = min;
fw_cmd.ver_build = build;
@@ -3612,7 +3616,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(fw_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
@@ -3628,7 +3632,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
break;
}
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b95631..6d4c260 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
@@ -105,13 +106,13 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length, u32 timeout, bool return_data);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+ u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 02c7333..072ef3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].dcb_pfc != pfc_disabled)
- *pfc_en |= 1 << tc;
+ *pfc_en |= BIT(tc);
}
}
@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct tc_configuration *tc_config = &cfg->tc_config[0];
- u8 prio_mask = 1 << up;
+ u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */
@@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
@@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
@@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3ba63f..b79e93a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (!(pfc_en & (1 << i))) {
+ if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index b5cc989..1011d64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
int enabled = 0;
for (j = 0; j < MAX_USER_PRIORITY; j++) {
- if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+ if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
enabled = 1;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 2707bda..b8fc3cf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
};
u8 up = dcb_getapp(adapter->netdev, &app);
- if (up && !(up & (1 << adapter->fcoe.up)))
+ if (up && !(up & BIT(adapter->fcoe.up)))
changes |= BIT_APP_UPCHG;
#endif
@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app->priority;
@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app_mask ?
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index b3530e1..59b771b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* Flow Control */
regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
- regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
- regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
- regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
- regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+ for (i = 0; i < 4; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
for (i = 0; i < 8; i++) {
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -547,6 +545,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -660,6 +659,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
for (i = 0; i < 8; i++)
@@ -718,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
- regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
- regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
+ regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
+ regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
for (i = 0; i < 8; i++)
regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
@@ -729,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
- regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+ regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
+ regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
@@ -801,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
- regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
- regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
- regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
- regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
- regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
- regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
- regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
- regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
for (i = 0; i < 8; i++)
regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
@@ -1443,6 +1442,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1583,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!shared_int) {
/*
@@ -1681,6 +1681,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1720,6 +1721,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1780,6 +1782,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2991,6 +2994,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -3007,14 +3011,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->phc_index = -1;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index e771e76..bcdc884 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7df3fe2..9f3677c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -53,15 +53,7 @@
#include <net/vxlan.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2015 Intel Corporation.";
+ "Copyright (c) 1999-2016 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
@@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
+ [board_x550em_a] = &ixgbe_x550em_a_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
/* required last entry */
{0, }
};
@@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
+ if (unlikely(hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+ struct ixgbe_adapter *adapter;
+ int i;
+
+ for (i = 0; i < 200; ++i) {
+ value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+ if (likely(!value))
+ goto writes_completed;
+ if (value == IXGBE_FAILED_READ_REG) {
+ ixgbe_remove_adapter(hw);
+ return IXGBE_FAILED_READ_REG;
+ }
+ udelay(5);
+ }
+
+ adapter = hw->back;
+ e_warn(hw, "register writes incomplete %08x\n", value);
+ }
+
+writes_completed:
value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbe_check_remove(hw, reg);
@@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("%-15s %016lX %016lX %016lX\n",
netdev->name,
netdev->state,
- netdev->trans_start,
+ dev_trans_start(netdev),
netdev->last_rx);
}
@@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1087,9 +1110,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+ int queue_index, u32 maxrate)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+ if (!maxrate)
+ return 0;
+
+ /* Calculate the rate factor values to set */
+ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+ bcnrc_val /= maxrate;
+
+ /* clear everything but the rate factor */
+ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+ IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+ /* enable the rate scheduler */
+ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+ return 0;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
- u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
}
@@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
return false;
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask |= IXGBE_EIMS_TS;
break;
default:
@@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ case ixgbe_mac_x550em_a:
+ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- ixgbe_for_each_ring(ring, q_vector->tx)
- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+ ixgbe_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Exit if we are called by netpoll or busy polling is active */
if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
ixgbe_qv_unlock_napi(q_vector);
@@ -2818,7 +2885,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
return 0;
}
@@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
* currently 40.
*/
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
- txdctl |= (1 << 16); /* WTHRESH = 1 */
+ txdctl |= 1u << 16; /* WTHRESH = 1 */
else
- txdctl |= (8 << 16); /* WTHRESH = 8 */
+ txdctl |= 8u << 16; /* WTHRESH = 8 */
/*
* Setting PTHRESH to 32 both improves performance
* and avoids a TX hang with DFP enabled
*/
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-
- /* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
- adapter->num_vfs);
-
- /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
- * calling set_ethertype_anti_spoofing for each VF in loop below
- */
- if (hw->mac.ops.set_ethertype_anti_spoofing) {
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- IXGBE_ETH_P_LLDP));
-
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- ETH_P_PAUSE));
- }
-
- /* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
- if (!adapter->vfinfo[i].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
- /* enable ethertype anti spoofing if hw supports it */
- if (hw->mac.ops.set_ethertype_anti_spoofing)
- hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+ /* configure spoof checking */
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+ adapter->vfinfo[i].spoofchk_enabled);
/* Enable/Disable RSS query feature */
ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
/* fall through for older HW */
@@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+ if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
* entry other than the PF.
*/
word = idx * 2 + (VMDQ_P(0) / 32);
- bits = ~(1 << (VMDQ_P(0)) % 32);
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
- ixgbe_update_pf_promisc_vlvf(adapter, vid);
- else
+ if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
@@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
- vlvfb |= 1 << (VMDQ_P(0) % 32);
+ vlvfb |= BIT(VMDQ_P(0) % 32);
IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
}
@@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
if (vlvf) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* remove PF from the pool */
word = i * 2 + VMDQ_P(0) / 32;
- bits = ~(1 << (VMDQ_P(0) % 32));
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
}
@@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
@@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
- u16 vid;
+ u16 vid = 1;
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
@@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ netdev_features_t features = netdev->features;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= IXGBE_VMOLR_MPE;
- ixgbe_vlan_promisc_enable(adapter);
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
hw->addr_ctrl.user_set_promisc = false;
- ixgbe_vlan_promisc_disable(adapter);
}
/*
@@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
}
/* This is useful for sniffing bad packets. */
- if (adapter->netdev->features & NETIF_F_RXALL) {
+ if (features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ixgbe_vlan_promisc_disable(adapter);
+ else
+ ixgbe_vlan_promisc_enable(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
adapter->vxlan_port = 0;
break;
@@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
/* shutdown specific queue receive and wait for dma to settle */
ixgbe_disable_rx_queue(adapter, rx_ring);
usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
ixgbe_clean_rx_ring(rx_ring);
rx_ring->l2_accel_priv = NULL;
}
@@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
gpie |= IXGBE_SDP0_GPIEN_X540;
break;
default:
@@ -5228,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -5498,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
ixgbe_tx_timeout_reset(adapter);
}
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct tc_configuration *tc;
+ int j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ case ixgbe_mac_82599EB:
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ default:
+ adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+ break;
+ }
+
+ /* Configure DCB traffic classes */
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
@@ -5512,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir;
u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
- int j;
- struct tc_configuration *tc;
-#endif
+ u16 device_caps;
+ int i;
/* PCI config space info */
@@ -5537,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5547,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/* initialize static ixgbe jump table entries */
- adapter->jump_tables[0] = ixgbe_ipv4_fields;
+ adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+ GFP_KERNEL);
+ if (!adapter->jump_tables[0])
+ return -ENOMEM;
+ adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+ adapter->jump_tables[i] = NULL;
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
hw->mac.num_rar_entries,
@@ -5585,6 +5707,17 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ /* Fall Through */
case ixgbe_mac_X550:
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5606,42 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
- break;
- default:
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- break;
- }
-
- /* Configure DCB traffic classes */
- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
- tc = &adapter->dcb_cfg.tc_config[j];
- tc->path[DCB_TX_CONFIG].bwg_id = 0;
- tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->path[DCB_RX_CONFIG].bwg_id = 0;
- tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->dcb_pfc = pfc_disabled;
- }
-
- /* Initialize default user to priority mapping, UPx->TC0 */
- tc = &adapter->dcb_cfg.tc_config[0];
- tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
- tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
- adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
- adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_set_bitmap = 0x00;
- adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
- memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- sizeof(adapter->temp_dcb_cfg));
-
+ ixgbe_init_dcb(adapter);
#endif
/* default flow control settings */
@@ -5675,6 +5773,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ adapter->need_crosstalk_fix = false;
+ else
+ adapter->need_crosstalk_fix = true;
+ break;
+ default:
+ adapter->need_crosstalk_fix = false;
+ break;
+ }
+
/* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
@@ -6217,6 +6331,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -6352,6 +6467,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -6367,7 +6483,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
if ((hw->mac.type == ixgbe_mac_82599EB) ||
(hw->mac.type == ixgbe_mac_X540) ||
(hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x)) {
+ (hw->mac.type == ixgbe_mac_X550EM_x) ||
+ (hw->mac.type == ixgbe_mac_x550em_a)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6392,6 +6509,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6562,7 +6680,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= ((u64)1 << i);
+ eics |= BIT_ULL(i);
}
}
@@ -6593,6 +6711,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
link_up = true;
}
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is empty.
+ */
+ if (adapter->need_crosstalk_fix) {
+ u32 sfp_cage_full;
+
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+ link_up = false;
+ }
+
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -6662,6 +6792,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6938,6 +7069,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
s32 err;
+ /* If crosstalk fix enabled verify the SFP+ cage is full */
+ if (adapter->need_crosstalk_fix) {
+ u32 sfp_cage_full;
+
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ if (!sfp_cage_full)
+ return;
+ }
+
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7122,10 +7263,12 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
#ifdef CONFIG_IXGBE_VXLAN
+ rtnl_lock();
if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
vxlan_get_rx_port(adapter->netdev);
}
+ rtnl_unlock();
#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
@@ -7148,9 +7291,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7163,46 +7315,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7211,103 +7369,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+ IXGBE_TX_FLAGS_CC)))
return;
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- u8 l4_hdr = 0;
- union {
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- u8 *raw;
- } network_hdr;
- union {
- struct tcphdr *tcphdr;
- u8 *raw;
- } transport_hdr;
- __be16 frag_off;
-
- if (skb->encapsulation) {
- network_hdr.raw = skb_inner_network_header(skb);
- transport_hdr.raw = skb_inner_transport_header(skb);
- vlan_macip_lens = skb_inner_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- network_hdr.raw = skb_network_header(skb);
- transport_hdr.raw = skb_transport_header(skb);
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- }
-
- /* use first 4 bits to determine IP version */
- switch (network_hdr.ipv4->version) {
- case IPVERSION:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = network_hdr.ipv4->protocol;
- break;
- case 6:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- l4_hdr = network_hdr.ipv6->nexthdr;
- if (likely((transport_hdr.raw - network_hdr.raw) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ goto no_csum;
+ }
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbe_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, version=%d, l4 proto=%x\n",
- network_hdr.ipv4->version, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
}
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
- type_tucmd, mss_l4len_idx);
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -8238,6 +8354,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
return 0;
}
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+ u8 *queue, u64 *action)
+{
+ unsigned int num_vfs = adapter->num_vfs, vf;
+ struct net_device *upper;
+ struct list_head *iter;
+
+ /* redirect to a SRIOV VF */
+ for (vf = 0; vf < num_vfs; ++vf) {
+ upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+ if (upper->ifindex == ifindex) {
+ if (adapter->num_rx_pools > 1)
+ *queue = vf * 2;
+ else
+ *queue = vf * adapter->num_rx_queues_per_pool;
+
+ *action = vf + 1;
+ *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ return 0;
+ }
+ }
+
+ /* redirect to a offloaded macvlan netdev */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+ *action = *queue;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ const struct tc_action *a;
+ int err;
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tc_for_each_action(a, exts) {
+
+ /* Drop action */
+ if (is_tcf_gact_shot(a)) {
+ *action = IXGBE_FDIR_DROP_QUEUE;
+ *queue = IXGBE_FDIR_DROP_QUEUE;
+ return 0;
+ }
+
+ /* Redirect to a VF or a offloaded macvlan */
+ if (is_tcf_mirred_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+
+ err = handle_redirect_action(adapter, ifindex, queue,
+ action);
+ if (err == 0)
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ struct tc_cls_u32_offload *cls,
+ struct ixgbe_mat_field *field_ptr,
+ struct ixgbe_nexthdr *nexthdr)
+{
+ int i, j, off;
+ __be32 val, m;
+ bool found_entry = false, found_jump_field = false;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ m = cls->knode.sel->keys[i].mask;
+
+ for (j = 0; field_ptr[j].val; j++) {
+ if (field_ptr[j].off == off) {
+ field_ptr[j].val(input, mask, val, m);
+ input->filter.formatted.flow_type |=
+ field_ptr[j].type;
+ found_entry = true;
+ break;
+ }
+ }
+ if (nexthdr) {
+ if (nexthdr->off == cls->knode.sel->keys[i].off &&
+ nexthdr->val == cls->knode.sel->keys[i].val &&
+ nexthdr->mask == cls->knode.sel->keys[i].mask)
+ found_jump_field = true;
+ else
+ continue;
+ }
+ }
+
+ if (nexthdr && !found_jump_field)
+ return -EINVAL;
+
+ if (!found_entry)
+ return 0;
+
+ mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ return 0;
+}
+
static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
__be16 protocol,
struct tc_cls_u32_offload *cls)
@@ -8245,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
u32 loc = cls->knode.handle & 0xfffff;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_mat_field *field_ptr;
- struct ixgbe_fdir_filter *input;
- union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
- const struct tc_action *a;
-#endif
- int i, err = 0;
+ struct ixgbe_fdir_filter *input = NULL;
+ union ixgbe_atr_input *mask = NULL;
+ struct ixgbe_jump_table *jump = NULL;
+ int i, err = -EINVAL;
u8 queue;
u32 uhtid, link_uhtid;
- memset(&mask, 0, sizeof(union ixgbe_atr_input));
uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
@@ -8266,38 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* headers when needed.
*/
if (protocol != htons(ETH_P_IP))
- return -EINVAL;
-
- if (link_uhtid) {
- struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-
- if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- if (!test_bit(link_uhtid - 1, &adapter->tables))
- return -EINVAL;
-
- for (i = 0; nexthdr[i].jump; i++) {
- if (nexthdr->o != cls->knode.sel->offoff ||
- nexthdr->s != cls->knode.sel->offshift ||
- nexthdr->m != cls->knode.sel->offmask ||
- /* do not support multiple key jumps its just mad */
- cls->knode.sel->nkeys > 1)
- return -EINVAL;
-
- if (nexthdr->off != cls->knode.sel->keys[0].off ||
- nexthdr->val != cls->knode.sel->keys[0].val ||
- nexthdr->mask != cls->knode.sel->keys[0].mask)
- return -EINVAL;
-
- adapter->jump_tables[link_uhtid] = nexthdr->jump;
- }
- return 0;
- }
+ return err;
if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
e_err(drv, "Location out of range\n");
- return -EINVAL;
+ return err;
}
/* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8308,87 +8522,123 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* this function _should_ be generic try not to hardcode values here.
*/
if (uhtid == 0x800) {
- field_ptr = adapter->jump_tables[0];
+ field_ptr = (adapter->jump_tables[0])->mat;
} else {
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- field_ptr = adapter->jump_tables[uhtid];
+ return err;
+ if (!adapter->jump_tables[uhtid])
+ return err;
+ field_ptr = (adapter->jump_tables[uhtid])->mat;
}
if (!field_ptr)
- return -EINVAL;
+ return err;
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
+ /* At this point we know the field_ptr is valid and need to either
+ * build cls_u32 link or attach filter. Because adding a link to
+ * a handle that does not exist is invalid and the same for adding
+ * rules to handles that don't exist.
+ */
- for (i = 0; i < cls->knode.sel->nkeys; i++) {
- int off = cls->knode.sel->keys[i].off;
- __be32 val = cls->knode.sel->keys[i].val;
- __be32 m = cls->knode.sel->keys[i].mask;
- bool found_entry = false;
- int j;
+ if (link_uhtid) {
+ struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
- for (j = 0; field_ptr[j].val; j++) {
- if (field_ptr[j].off == off) {
- field_ptr[j].val(input, &mask, val, m);
- input->filter.formatted.flow_type |=
- field_ptr[j].type;
- found_entry = true;
+ if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return err;
+
+ if (!test_bit(link_uhtid - 1, &adapter->tables))
+ return err;
+
+ for (i = 0; nexthdr[i].jump; i++) {
+ if (nexthdr[i].o != cls->knode.sel->offoff ||
+ nexthdr[i].s != cls->knode.sel->offshift ||
+ nexthdr[i].m != cls->knode.sel->offmask)
+ return err;
+
+ jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+ if (!jump)
+ return -ENOMEM;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input) {
+ err = -ENOMEM;
+ goto free_jump;
+ }
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
+ jump->input = input;
+ jump->mask = mask;
+ err = ixgbe_clsu32_build_input(input, mask, cls,
+ field_ptr, &nexthdr[i]);
+ if (!err) {
+ jump->mat = nexthdr[i].jump;
+ adapter->jump_tables[link_uhtid] = jump;
break;
}
}
-
- if (!found_entry)
- goto err_out;
+ return 0;
}
- mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
- IXGBE_ATR_L4TYPE_MASK;
-
- if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
- mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
-#ifdef CONFIG_NET_CLS_ACT
- if (list_empty(&cls->knode.exts->actions))
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+ if ((adapter->jump_tables[uhtid])->input)
+ memcpy(input, (adapter->jump_tables[uhtid])->input,
+ sizeof(*input));
+ if ((adapter->jump_tables[uhtid])->mask)
+ memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+ sizeof(*mask));
+ }
+ err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+ if (err)
goto err_out;
- list_for_each_entry(a, &cls->knode.exts->actions, list) {
- if (!is_tcf_gact_shot(a))
- goto err_out;
- }
-#endif
+ err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+ &queue);
+ if (err < 0)
+ goto err_out;
- input->action = IXGBE_FDIR_DROP_QUEUE;
- queue = IXGBE_FDIR_DROP_QUEUE;
input->sw_idx = loc;
spin_lock(&adapter->fdir_perfect_lock);
if (hlist_empty(&adapter->fdir_filter_list)) {
- memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
- err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, mask);
if (err)
goto err_out_w_lock;
- } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
err = -EINVAL;
goto err_out_w_lock;
}
- ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
if (!err)
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
+ kfree(mask);
return err;
err_out_w_lock:
spin_unlock(&adapter->fdir_perfect_lock);
err_out:
+ kfree(mask);
+free_input:
kfree(input);
- return -EINVAL;
+free_jump:
+ kfree(jump);
+ return err;
}
static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
@@ -8515,11 +8765,6 @@ static int ixgbe_set_features(struct net_device *netdev,
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- ixgbe_vlan_strip_enable(adapter);
- else
- ixgbe_vlan_strip_disable(adapter);
-
if (changed & NETIF_F_RXALL)
need_reset = true;
@@ -8536,6 +8781,9 @@ static int ixgbe_set_features(struct net_device *netdev,
if (need_reset)
ixgbe_do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ ixgbe_set_rx_mode(netdev);
return 0;
}
@@ -8833,17 +9081,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN 127
+#define IXGBE_MAX_NETWORK_HDR_LEN 511
+
static netdev_features_t
ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- if (!skb->encapsulation)
- return features;
-
- if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
- IXGBE_MAX_TUNNEL_HDR_LEN))
- return features & ~NETIF_F_CSUM_MASK;
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
return features;
}
@@ -8858,6 +9125,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu,
.ndo_tx_timeout = ixgbe_tx_timeout,
+ .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
@@ -8943,7 +9211,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
/**
* ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
* @device_id: the device ID
* @subdev_id: the subsystem device ID
*
@@ -8951,19 +9219,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* which devices have WoL support
*
**/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
- int is_wol_supported = 0;
+ /* WOL not supported on 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ /* check eeprom to see if WOL is enabled for X540 and newer */
+ if (hw->mac.type >= ixgbe_mac_X540) {
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ return true;
+ }
+
+ /* WOL is determined based on device IDs for 82599 MACs */
switch (device_id) {
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
- case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
+ case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+ case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
/* only support first port */
if (hw->bus.func != 0)
break;
@@ -8971,66 +9253,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
case IXGBE_SUBDEV_ID_82599_ECNA_DP:
- case IXGBE_SUBDEV_ID_82599_LOM_SFP:
- is_wol_supported = 1;
- break;
+ case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+ return true;
}
break;
case IXGBE_DEV_ID_82599EN_SFP:
- /* Only this subdevice supports WOL */
+ /* Only these subdevices support WOL */
switch (subdevice_id) {
case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
- is_wol_supported = 1;
- break;
+ return true;
}
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- is_wol_supported = 1;
+ return true;
break;
case IXGBE_DEV_ID_82599_KX4:
- is_wol_supported = 1;
- break;
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_X550T:
- case IXGBE_DEV_ID_X550EM_X_KX4:
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_X_10G_T:
- /* check eeprom to see if enabled wol */
- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- (hw->bus.func == 0))) {
- is_wol_supported = 1;
- }
+ return true;
+ default:
break;
}
- return is_wol_supported;
-}
-
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
- struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
- struct ixgbe_hw *hw = &adapter->hw;
- const unsigned char *addr;
-
- addr = of_get_mac_address(dp);
- if (addr) {
- ether_addr_copy(hw->mac.perm_addr, addr);
- return;
- }
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
- ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
+ return false;
}
/**
@@ -9136,23 +9383,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
hw->mvals = ii->mvals;
/* EEPROM */
- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+ hw->eeprom.ops = *ii->eeprom_ops;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
}
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
- if (!(eec & (1 << 8)))
+ if (!(eec & BIT(8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
/* PHY */
- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+ hw->phy.ops = *ii->phy_ops;
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
/* ixgbe_identify_phy_generic will set prtad and mmds properly */
hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9169,12 +9416,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -9215,54 +9467,62 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto skip_sriov;
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
skip_sriov:
#endif
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM;
- netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPIP | \
+ NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
+ netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ IXGBE_GSO_PARTIAL_FEATURES;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC;
- netdev->hw_features |= NETIF_F_SCTP_CRC |
- NETIF_F_NTUPLE |
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL |
+ NETIF_F_HW_L2FW_DOFFLOAD;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- break;
- default:
- break;
- }
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
#ifdef CONFIG_IXGBE_DCB
- netdev->dcbnl_ops = &dcbnl_ops;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ netdev->dcbnl_ops = &dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9287,10 +9547,6 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -9304,7 +9560,8 @@ skip_sriov:
goto err_sw_init;
}
- ixgbe_get_platform_mac_addr(adapter);
+ eth_platform_get_mac_address(&adapter->pdev->dev,
+ adapter->hw.mac.perm_addr);
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
@@ -9455,6 +9712,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
@@ -9483,6 +9741,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev;
bool disable_dev;
+ int i;
/* if !adapter then we already cleaned up in probe */
if (!adapter)
@@ -9532,6 +9791,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ if (adapter->jump_tables[i]) {
+ kfree(adapter->jump_tables[i]->input);
+ kfree(adapter->jump_tables[i]->mask);
+ }
+ kfree(adapter->jump_tables[i]);
+ }
+
kfree(adapter->mac_table);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -9612,6 +9879,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_X550EM_x:
device_id = IXGBE_DEV_ID_X550EM_X_VF;
break;
+ case ixgbe_mac_x550em_a:
+ device_id = IXGBE_DEV_ID_X550EM_A_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 9993a47..a0cb843 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
size = mbx->size;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
return IXGBE_ERR_MBX;
- if (!mbx->ops.write)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.write(hw, msg, size, mbx_id);
+ return mbx->ops->write(hw, msg, size, mbx_id);
}
/**
@@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_msg)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_msg(hw, mbx_id);
+ return mbx->ops->check_for_msg(hw, mbx_id);
}
/**
@@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_ack)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_ack(hw, mbx_id);
+ return mbx->ops->check_for_ack(hw, mbx_id);
}
/**
@@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_rst)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_rst(hw, mbx_id);
+ return mbx->ops->check_for_rst(hw, mbx_id);
}
/**
@@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_msg)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_msg(hw, mbx_id)) {
+ while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_ack)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_ack(hw, mbx_id)) {
+ while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
@@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ret_val;
/* if ack received read message */
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
/* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
+ if (!mbx->ops || !mbx->timeout)
return IXGBE_ERR_MBX;
/* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+ ret_val = mbx->ops->write(hw, msg, size, mbx_id);
if (ret_val)
return ret_val;
@@ -307,14 +307,15 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
- if (vflre & (1 << vf_shift)) {
- IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ if (vflre & BIT(vf_shift)) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++;
return 0;
}
@@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_x550em_a &&
hw->mac.type != ixgbe_mac_X540)
return;
@@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
}
#endif /* CONFIG_PCI_IOV */
-struct ixgbe_mbx_operations mbx_ops_generic = {
+const struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8daa95f..01c2667 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
-extern struct ixgbe_mbx_operations mbx_ops_generic;
+extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 74c53ad..a8bed3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -38,6 +38,12 @@ struct ixgbe_mat_field {
unsigned int type;
};
+struct ixgbe_jump_table {
+ struct ixgbe_mat_field *mat;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input *mask;
+};
+
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
@@ -82,6 +88,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
{ .val = NULL } /* terminal node */
};
+static struct ixgbe_mat_field ixgbe_udp_fields[] = {
+ {.off = 0, .val = ixgbe_mat_prgm_ports,
+ .type = IXGBE_ATR_FLOW_TYPE_UDPV4},
+ { .val = NULL } /* terminal node */
+};
+
struct ixgbe_nexthdr {
/* offset, shift, and mask of position to next header */
unsigned int o;
@@ -98,6 +110,8 @@ struct ixgbe_nexthdr {
static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
{ .o = 0, .s = 6, .m = 0xf,
.off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
+ { .o = 0, .s = 6, .m = 0xf,
+ .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields},
{ .jump = NULL } /* terminal node */
};
#endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5abd66c..cc735ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,7 +81,11 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -103,7 +107,7 @@
#define IXGBE_PE 0xE0 /* Port expander addr */
#define IXGBE_PE_OUTPUT 1 /* Output reg offset */
#define IXGBE_PE_CONFIG 3 /* Config reg offset */
-#define IXGBE_PE_BIT1 (1 << 1)
+#define IXGBE_PE_BIT1 BIT(1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef1504d..e5431bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
*/
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -395,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
if (incval > 0x00FFFFFFULL)
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) |
+ BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
break;
default:
@@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
cc.shift = 2;
}
/* fallthrough */
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
@@ -1111,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
incval >>= IXGBE_INCVAL_SHIFT_82599;
cc.shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ BIT(IXGBE_INCPER_SHIFT_82599) | incval);
break;
default:
/* other devices aren't supported */
@@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8025a3f..c5caacd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
vmolr |= IXGBE_VMOLR_ROMPE;
@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err)
- vfre &= ~(1 << vf_shift);
+ vfre &= ~BIT(vf_shift);
else
- vfre |= 1 << vf_shift;
+ vfre |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) {
@@ -589,47 +589,47 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 i;
+ u32 vlvfb_mask, pool_mask, i;
+
+ /* create mask for VF and other pools */
+ pool_mask = ~BIT(VMDQ_P(0) % 32);
+ vlvfb_mask = BIT(vf % 32);
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) {
u32 bits[2], vlvfb, vid, vfta, vlvf;
u32 word = i * 2 + vf / 32;
- u32 mask = 1 << (vf % 32);
+ u32 mask;
vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* if our bit isn't set we can skip it */
- if (!(vlvfb & mask))
+ if (!(vlvfb & vlvfb_mask))
continue;
/* clear our bit from vlvfb */
- vlvfb ^= mask;
+ vlvfb ^= vlvfb_mask;
/* create 64b mask to chedk to see if we should clear VLVF */
bits[word % 2] = vlvfb;
bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
- /* if promisc is enabled, PF will be present, leave VFTA */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
- bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
-
- if (bits[0] || bits[1])
- goto update_vlvfb;
- goto update_vlvf;
- }
-
/* if other pools are present, just remove ourselves */
- if (bits[0] || bits[1])
+ if (bits[(VMDQ_P(0) / 32) ^ 1] ||
+ (bits[VMDQ_P(0) / 32] & pool_mask))
goto update_vlvfb;
+ /* if PF is present, leave VFTA */
+ if (bits[0] || bits[1])
+ goto update_vlvf;
+
/* if we cannot determine VLAN just remove ourselves */
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
if (!vlvf)
goto update_vlvfb;
vid = vlvf & VLAN_VID_MASK;
- mask = 1 << (vid % 32);
+ mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
@@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
update_vlvf:
/* clear POOL selection enable */
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ vlvfb = 0;
update_vlvfb:
/* clear pool bits */
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
@@ -810,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */
@@ -818,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
/*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe
@@ -834,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
#endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~(1 << vf_shift);
+ reg &= ~BIT(vf_shift);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
@@ -843,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
- reg |= (1 << vf_shift);
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/*
@@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return -1;
}
- if (adapter->vfinfo[vf].pf_set_mac &&
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
!ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
@@ -905,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = netdev_get_num_tc(adapter->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int err;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -920,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!vid && !add)
return 0;
- err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (err)
- return err;
-
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
-
- return 0;
+ return ixgbe_set_vf_vlan(adapter, add, vid, vf);
}
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -961,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives.
*/
- if (adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+ }
}
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
@@ -1318,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].vlan_count++;
/* enable hide vlan on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1353,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
/* disable hide VLAN on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1395,7 +1381,7 @@ out:
return err;
}
-static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
{
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
@@ -1522,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int vf_target_reg = vf >> 3;
- int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw;
- u32 regval;
if (vf >= adapter->num_vfs)
return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
-
- if (adapter->vfinfo[vf].vlan_count) {
- vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+ /* configure MAC spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
+
+ /* configure VLAN spoofing */
+ hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
+
+ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
+ * calling set_ethertype_anti_spoofing for each VF in loop below
+ */
+ if (hw->mac.ops.set_ethertype_anti_spoofing) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETH_P_LLDP));
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ ETH_P_PAUSE));
+
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index dad9257..47e65e2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index bf7367a..da3d835 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -59,8 +59,12 @@
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -75,21 +79,25 @@
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550T1 0x15D1
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
-#define IXGBE_DEV_ID_X550_VF_HV 0x1564
-#define IXGBE_DEV_ID_X550_VF 0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */
-#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -128,7 +136,7 @@
#define IXGBE_FLA_X540 IXGBE_FLA_8259X
#define IXGBE_FLA_X550 IXGBE_FLA_8259X
#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
-#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -143,13 +151,6 @@
#define IXGBE_GRC_X550EM_a 0x15F64
#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
-#define IXGBE_SRAMREL_8259X 0x10210
-#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_a 0x15F6C
-#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
-
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
@@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
#define IXGBE_RXFECCERR0 0x051B8
#define IXGBE_LLITHRESH 0x0EC90
@@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -543,6 +548,7 @@ struct ixgbe_thermal_sensor_data {
/* DCB registers */
#define MAX_TRAFFIC_CLASS 8
#define X540_TRAFFIC_CLASS 4
+#define DEF_TRAFFIC_CLASS 1
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@@ -554,7 +560,6 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-
/* Security Control Registers */
#define IXGBE_SECTXCTRL 0x08800
#define IXGBE_SECTXSTAT 0x08804
@@ -693,16 +698,16 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */
#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
#define IXGBE_FCBUFF_OFFSET_SHIFT 16
-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */
+#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */
#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
@@ -719,23 +724,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */
#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */
/* FCoE Receive Control */
#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */
#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
/* FCoE Redirection */
@@ -1056,15 +1061,9 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
#define IXGBE_TDPROBE 0x07F20
#define IXGBE_TXBUFCTRL 0x0C600
-#define IXGBE_TXBUFDATA0 0x0C610
-#define IXGBE_TXBUFDATA1 0x0C614
-#define IXGBE_TXBUFDATA2 0x0C618
-#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_RXBUFCTRL 0x03600
-#define IXGBE_RXBUFDATA0 0x03610
-#define IXGBE_RXBUFDATA1 0x03614
-#define IXGBE_RXBUFDATA2 0x03618
-#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
#define IXGBE_RFVAL 0x050A4
#define IXGBE_MDFTC1 0x042B8
@@ -1127,6 +1126,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_XPCSS 0x04290
#define IXGBE_MFLCN 0x04294
#define IXGBE_SERDESC 0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
#define IXGBE_MACS 0x0429C
#define IXGBE_AUTOC 0x042A0
#define IXGBE_LINKS 0x042A4
@@ -1203,6 +1203,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -1249,20 +1251,20 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@@ -1309,6 +1311,7 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1740,7 +1743,7 @@ enum {
#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */
#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
@@ -1866,20 +1869,20 @@ enum {
#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
@@ -1957,7 +1960,9 @@ enum {
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -1997,6 +2002,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2111,6 +2119,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2530,6 +2539,10 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
@@ -2620,6 +2633,20 @@ enum ixgbe_fdir_pballoc_type {
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_PHY_TOKEN_REQ_CMD 0x0A
+#define FW_PHY_TOKEN_REQ_LEN 2
+#define FW_PHY_TOKEN_REQ 0
+#define FW_PHY_TOKEN_REL 1
+#define FW_PHY_TOKEN_OK 1
+#define FW_PHY_TOKEN_RETRY 0x80
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+#define FW_INT_PHY_REQ_CMD 0xB
+#define FW_INT_PHY_REQ_LEN 10
+#define FW_INT_PHY_REQ_READ 0
+#define FW_INT_PHY_REQ_WRITE 1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2688,6 +2715,28 @@ struct ixgbe_hic_disable_rxen {
u16 pad3;
};
+struct ixgbe_hic_phy_token_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ __be16 address;
+ u16 rsv1;
+ __be32 write_data;
+ u16 pad;
+} __packed;
+
+struct ixgbe_hic_internal_phy_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 read_data;
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2786,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
@@ -2948,7 +2997,6 @@ union ixgbe_atr_hash_dword {
IXGBE_CAT(EEC, m), \
IXGBE_CAT(FLA, m), \
IXGBE_CAT(GRC, m), \
- IXGBE_CAT(SRAMREL, m), \
IXGBE_CAT(FACTPS, m), \
IXGBE_CAT(SWSM, m), \
IXGBE_CAT(SWFW_SYNC, m), \
@@ -2989,6 +3037,7 @@ enum ixgbe_mac_type {
ixgbe_mac_X540,
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
+ ixgbe_mac_x550em_a,
ixgbe_num_macs
};
@@ -3017,6 +3066,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_intel,
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
+ ixgbe_phy_sgmii,
ixgbe_phy_generic
};
@@ -3130,8 +3180,9 @@ struct ixgbe_bus_info {
enum ixgbe_bus_width width;
enum ixgbe_bus_type type;
- u16 func;
- u16 lan_id;
+ u8 func;
+ u8 lan_id;
+ u8 instance_id;
};
/* Flow control parameters */
@@ -3266,6 +3317,7 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -3308,6 +3360,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3323,6 +3376,8 @@ struct ixgbe_mac_operations {
s32 (*dmac_config)(struct ixgbe_hw *hw);
s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
@@ -3442,7 +3497,7 @@ struct ixgbe_mbx_stats {
};
struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
+ const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 usec_delay;
@@ -3475,10 +3530,10 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
s32 (*get_invariants)(struct ixgbe_hw *);
- struct ixgbe_mac_operations *mac_ops;
- struct ixgbe_eeprom_operations *eeprom_ops;
- struct ixgbe_phy_operations *phy_ops;
- struct ixgbe_mbx_operations *mbx_ops;
+ const struct ixgbe_mac_operations *mac_ops;
+ const struct ixgbe_eeprom_operations *eeprom_ops;
+ const struct ixgbe_phy_operations *phy_ops;
+ const struct ixgbe_mbx_operations *mbx_ops;
const u32 *mvals;
};
@@ -3517,14 +3572,19 @@ struct ixgbe_info {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+#define IXGBE_ERR_FW_RESP_INVALID -39
+#define IXGBE_ERR_TOKEN_RETRY -40
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
-#define IXGBE_FUSES0_REV_MASK (3 << 6)
+#define IXGBE_FUSES0_REV_MASK (3u << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -3532,43 +3592,54 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29)
+
+#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2)
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3584,12 +3655,17 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
-#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2358c1b..f2b1d48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
@@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
return 0;
}
-static struct ixgbe_mac_operations mac_ops_X540 = {
+static const struct ixgbe_mac_operations mac_ops_X540 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.start_hw = &ixgbe_start_hw_X540,
@@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
@@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
.disable_rx_buff = &ixgbe_disable_rx_buff_generic,
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
@@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.init_params = &ixgbe_init_eeprom_params_X540,
.read = &ixgbe_read_eerd_X540,
.read_buffer = &ixgbe_read_eerd_buffer_X540,
@@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
};
-static struct ixgbe_phy_operations phy_ops_X540 = {
+static const struct ixgbe_phy_operations phy_ops_X540 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
.init = NULL,
@@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X540)
};
-struct ixgbe_info ixgbe_X540_info = {
+const struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X540,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index a1468b1..e21cd48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 68a9c64..19b75cd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
+ * Copyright(c) 1999 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include "ixgbe_phy.h"
static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
@@ -272,16 +273,26 @@ out:
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
+ /* Fallthrough */
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -324,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -412,6 +423,121 @@ out:
return ret;
}
+/**
+ * ixgbe_get_phy_token - Get the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REQ;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ return IXGBE_ERR_FW_RESP_INVALID;
+
+ return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REL;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 data)
+{
+ struct ixgbe_hic_internal_phy_req write_cmd;
+
+ memset(&write_cmd, 0, sizeof(write_cmd));
+ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ write_cmd.port_number = hw->bus.lan_id;
+ write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+ write_cmd.address = cpu_to_be16(reg_addr);
+ write_cmd.write_data = cpu_to_be32(data);
+
+ return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 *data)
+{
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
+ s32 status;
+
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = cpu_to_be16(reg_addr);
+
+ status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* Extract the register value from the response. */
+ *data = be32_to_cpu(hic.rsp.read_data);
+
+ return status;
+}
+
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
* command assuming that the semaphore is already obtained.
* @hw: pointer to hardware structure
@@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
/* one word */
buffer.length = cpu_to_be16(sizeof(u16));
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
if (status)
return status;
@@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ status = ixgbe_host_interface_command(hw, &buffer,
sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT,
false);
@@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
buffer.data = data;
buffer.address = cpu_to_be32(offset * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
buffer.req.checksum = FW_DEFAULT_CHECKSUM;
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- fw_cmd.port_number = (u8)hw->bus.lan_id;
+ fw_cmd.port_number = hw->bus.lan_id;
- status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ status = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(struct ixgbe_hic_disable_rxen),
IXGBE_HI_COMMAND_TIMEOUT, true);
@@ -1248,6 +1371,117 @@ i2c_err:
}
/**
+ * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for native SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ bool setup_linear = false;
+ u32 reg_phy_int;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for native SFI */
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_phy_int);
+ if (rc)
+ return rc;
+
+ if (setup_linear) {
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+ } else {
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+ }
+
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_phy_int);
+ if (rc)
+ return rc;
+
+ /* Setup XFI/SFI internal link */
+ return ixgbe_setup_ixfi_x550em(hw, &speed);
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ u32 reg_slice, slice_offset;
+ bool setup_linear = false;
+ u16 reg_phy_ext;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Get external PHY device id */
+ rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ if (rc)
+ return rc;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+ return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
+ reg_phy_ext);
+}
+
+/**
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
return 0;
}
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+static s32
+ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+ return rc;
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1342,15 +1627,35 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ break;
+ default:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
+ break;
+ }
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
+ return;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
break;
default:
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
break;
}
}
@@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
@@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
/* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
@@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, &reg_val);
if (status)
return status;
@@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
/* Restart auto-negotiation. */
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, reg_val);
return status;
}
-/** ixgbe_setup_kr_x550em - Configure the KR PHY.
- * @hw: pointer to hardware structure
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY
+ * @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+{
+ bool pause, asm_dir;
+ u32 reg_val;
+ s32 rc;
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Determine PAUSE and ASM_DIR bits. */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ pause = false;
+ asm_dir = false;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause = false;
+ asm_dir = true;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ /* Fallthrough */
+ case ixgbe_fc_full:
+ pause = true;
+ asm_dir = true;
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L)
+ return 0;
+
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_val);
+ if (rc)
+ return rc;
+
+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ if (pause)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ if (asm_dir)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_val);
+
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = true;
+
+ return rc;
+}
+
/** ixgbe_enter_lplu_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
@@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values.
+ */
+static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (!hw->phy.nw_mng_if_sel) {
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ struct ixgbe_adapter *adapter = hw->back;
+
+ e_warn(drv, "nw_mng_if_sel not set\n");
+ }
+ return;
+ }
+
+ hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
}
/* Identify the PHY or SFP module */
@@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Detect if there is a copper PHY attached. */
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ hw->phy.type = ixgbe_phy_sgmii;
+ /* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
- media_type = ixgbe_media_type_copper;
+ media_type = ixgbe_media_type_copper;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+ u32 hlreg0;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ default:
+ break;
+ }
+}
+
/** ixgbe_reset_hw_X550em - Perform hardware reset
** @hw: pointer to hardware structure
**
@@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
- u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -2179,11 +2623,7 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- }
+ ixgbe_set_mdio_speed(hw);
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
@@ -2206,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, mask);
}
+/**
+ * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared PHY token as needed
+ */
+static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status;
+
+ while (--retries) {
+ status = 0;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+ if (status)
+ return status;
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return 0;
+
+ status = ixgbe_get_phy_token(hw);
+ if (!status)
+ return 0;
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+ if (status != IXGBE_ERR_TOKEN_RETRY)
+ return status;
+ msleep(FW_PHY_TOKEN_DELAY);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Release the SWFW semaphore and puts the shared PHY token as needed
+ */
+static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+ if (mask & IXGBE_GSSR_TOKEN_SM)
+ ixgbe_put_phy_token(hw);
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ */
+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ */
+static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
.init_thermal_sensor_thresh = NULL, \
- .prot_autoc_read = &prot_autoc_read_generic, \
- .prot_autoc_write = &prot_autoc_write_generic, \
.enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \
-static struct ixgbe_mac_operations mac_ops_X550 = {
+static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
@@ -2354,20 +2896,45 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.setup_sfp = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
};
-static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
.release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .setup_fc = NULL, /* defined later */
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
+};
+
+static struct ixgbe_mac_operations mac_ops_x550em_a = {
+ X550_COMMON_MAC
+ .reset_hw = ixgbe_reset_hw_X550em,
+ .get_media_type = ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
+ .setup_fc = ixgbe_setup_fc_x550em,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
#define X550_COMMON_EEP \
@@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
-static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X550,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X540,
};
@@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
- .read_reg = &ixgbe_read_phy_reg_generic, \
- .write_reg = &ixgbe_write_phy_reg_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
.set_phy_power = NULL, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
-static struct ixgbe_phy_operations phy_ops_X550 = {
+static const struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
};
-static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
.read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
&ixgbe_write_i2c_combined_generic_unlocked,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_a = {
+ X550_COMMON_PHY
+ .init = &ixgbe_init_phy_ops_X550em,
+ .identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_x550a,
+ .write_reg = &ixgbe_write_phy_reg_x550a,
+};
+
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550)
};
@@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-struct ixgbe_info ixgbe_X550_info = {
+static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_a)
+};
+
+const struct ixgbe_info ixgbe_X550_info = {
.mac = ixgbe_mac_X550,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X550,
@@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = {
.mvals = ixgbe_mvals_X550,
};
-struct ixgbe_info ixgbe_X550EM_x_info = {
+const struct ixgbe_info ixgbe_X550EM_x_info = {
.mac = ixgbe_mac_X550EM_x,
.get_invariants = &ixgbe_get_invariants_X550_x,
.mac_ops = &mac_ops_X550EM_x,
@@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_X550EM_x,
};
+
+const struct ixgbe_info ixgbe_x550em_a_info = {
+ .mac = ixgbe_mac_x550em_a,
+ .get_invariants = &ixgbe_get_invariants_X550_x,
+ .mac_ops = &mac_ops_x550em_a,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_a,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5843458..ae09d60 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,6 +33,11 @@
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -74,7 +79,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -296,16 +301,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index d7aa4b2..508e72c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -42,65 +42,54 @@
#define IXGBE_ALL_RAR_ENTRIES 16
+enum {NETDEV_STATS, IXGBEVF_STATS};
+
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- struct {
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
- };
+ int type;
+ int sizeof_stat;
+ int stat_offset;
};
-#define IXGBEVF_STAT(m, b, r) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
- .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+#define IXGBEVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .type = IXGBEVF_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
}
-#define IXGBEVF_ZSTAT(m) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = -1, \
- .saved_reset_offset = -1 \
+#define IXGBEVF_NETDEV_STAT(_net_stat) { \
+ .stat_string = #_net_stat, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
- stats.saved_reset_vfgprc)},
- {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
- stats.saved_reset_vfgptc)},
- {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
- stats.saved_reset_vfgorc)},
- {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
- stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
- {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
- {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
- {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
- stats.saved_reset_vfmprc)},
- {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-#ifdef BP_EXTENDED_STATS
- {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
- {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
- {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
- {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
- {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
- {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
-#endif
+static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
+ IXGBEVF_NETDEV_STAT(rx_packets),
+ IXGBEVF_NETDEV_STAT(tx_packets),
+ IXGBEVF_NETDEV_STAT(rx_bytes),
+ IXGBEVF_NETDEV_STAT(tx_bytes),
+ IXGBEVF_STAT("tx_busy", tx_busy),
+ IXGBEVF_STAT("tx_restart_queue", restart_queue),
+ IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
+ IXGBEVF_NETDEV_STAT(multicast),
+ IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
};
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBEVF_QUEUE_STATS_LEN ( \
+ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_stats) / sizeof(u64)))
+#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
memset(p, 0, regs_len);
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+ /* generate a number suitable for ethtool's register version */
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
@@ -392,13 +382,13 @@ clear_reset:
return err;
}
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
+ return IXGBEVF_TEST_LEN;
case ETH_SS_STATS:
- return IXGBE_GLOBAL_STATS_LEN;
+ return IXGBEVF_STATS_LEN;
default:
return -EINVAL;
}
@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- char *base = (char *)adapter;
- int i;
-#ifdef BP_EXTENDED_STATS
- u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
- tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *net_stats;
+ unsigned int start;
+ struct ixgbevf_ring *ring;
+ int i, j;
+ char *p;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i]->stats.yields;
- rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
- rx_yields += adapter->rx_ring[i]->stats.yields;
- }
+ ixgbevf_update_stats(adapter);
+ net_stats = dev_get_stats(netdev, &temp);
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ switch (ixgbevf_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *)net_stats +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBEVF_STATS:
+ p = (char *)adapter +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i]->stats.yields;
- tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
- tx_yields += adapter->tx_ring[i]->stats.yields;
+ data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- adapter->bp_rx_yields = rx_yields;
- adapter->bp_rx_cleaned = rx_cleaned;
- adapter->bp_rx_missed = rx_missed;
+ /* populate Tx queue data */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
+ }
- adapter->bp_tx_yields = tx_yields;
- adapter->bp_tx_cleaned = tx_cleaned;
- adapter->bp_tx_missed = tx_missed;
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
#endif
+ }
- ixgbevf_update_stats(adapter);
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = base + ixgbe_gstrings_stats[i].stat_offset;
- char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
-
- if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
- else
- data[i] = *(u64 *)p;
- } else {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
- else
- data[i] = *(u32 *)p;
+ /* populate Rx queue data */
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
}
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
+#endif
}
}
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
break;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 991eeae..d5944c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -166,10 +166,10 @@ struct ixgbevf_ring {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_CSUM BIT(0)
+#define IXGBE_TX_FLAGS_VLAN BIT(1)
+#define IXGBE_TX_FLAGS_TSO BIT(2)
+#define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -403,13 +403,6 @@ struct ixgbevf_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- /* Some features need tri-state capability,
- * thus the additional *_CAPABLE flags.
- */
- u32 flags;
-#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
-#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
-
struct msix_entry *msix_entries;
/* OS defined structs */
@@ -429,16 +422,6 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
-#ifdef BP_EXTENDED_STATS
- u64 bp_rx_yields;
- u64 bp_rx_cleaned;
- u64 bp_rx_missed;
-
- u64 bp_tx_yields;
- u64 bp_tx_cleaned;
- u64 bp_tx_missed;
-#endif
-
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
@@ -461,13 +444,19 @@ enum ixbgevf_state_t {
__IXGBEVF_REMOVING,
__IXGBEVF_SERVICE_SCHED,
__IXGBEVF_SERVICE_INITED,
+ __IXGBEVF_RESET_REQUESTED,
+ __IXGBEVF_QUEUE_RESET_REQUESTED,
};
enum ixgbevf_boards {
board_82599_vf,
+ board_82599_vf_hv,
board_X540_vf,
+ board_X540_vf_hv,
board_X550_vf,
+ board_X550_vf_hv,
board_X550EM_x_vf,
+ board_X550EM_x_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -482,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b0edae9..5e348b1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -62,10 +62,14 @@ static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
- [board_82599_vf] = &ixgbevf_82599_vf_info,
- [board_X540_vf] = &ixgbevf_X540_vf_info,
- [board_X550_vf] = &ixgbevf_X550_vf_info,
- [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+ [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
+ [board_X540_vf] = &ixgbevf_X540_vf_info,
+ [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
+ [board_X550_vf] = &ixgbevf_X550_vf_info,
+ [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
*/
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
/* required last entry */
{0, }
};
@@ -268,7 +276,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
{
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
ixgbevf_service_event_schedule(adapter);
}
}
@@ -288,9 +296,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev)
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: board private structure
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *tx_ring)
+ struct ixgbevf_ring *tx_ring, int napi_budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer;
@@ -328,7 +337,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1013,8 +1022,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- ixgbevf_for_each_ring(ring, q_vector->tx)
- clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ ixgbevf_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
if (budget <= 0)
return budget;
@@ -1035,7 +1046,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1052,7 +1064,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
- 1 << q_vector->v_idx);
+ BIT(q_vector->v_idx));
return 0;
}
@@ -1154,14 +1166,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
}
/* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= 1 << v_idx;
+ adapter->eims_enable_mask |= BIT(v_idx);
ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
/* setup eims_other and add value to global eims_enable_mask */
- adapter->eims_other = 1 << v_idx;
+ adapter->eims_other = BIT(v_idx);
adapter->eims_enable_mask |= adapter->eims_other;
}
@@ -1585,8 +1597,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (8 << 16); /* WTHRESH = 8 */
/* Setting PTHRESH to 32 both improves performance */
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
- 32; /* PTHRESH = 32 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
@@ -1642,7 +1654,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_PSRTYPE_L2HDR;
if (adapter->num_rx_queues > 1)
- psrtype |= 1 << 29;
+ psrtype |= BIT(29);
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
}
@@ -1748,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
+#ifndef CONFIG_SPARC
/* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+#else
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN);
+#endif
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
@@ -1791,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
@@ -1904,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
spin_lock_bh(&adapter->mbx_lock);
- hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+ hw->mac.ops.update_xcast_mode(hw, xcast_mode);
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -1984,7 +2002,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
hw->mbx.timeout = 0;
/* wait for watchdog to come around and bail us out */
- adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
}
return 0;
@@ -2052,7 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
- err = ixgbevf_negotiate_api_version(hw, api[idx]);
+ err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
if (!err)
break;
idx++;
@@ -2749,11 +2767,9 @@ static void ixgbevf_service_timer(unsigned long data)
static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
{
- if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
-
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -2795,7 +2811,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= 1 << i;
+ eics |= BIT(i);
}
/* Cause software interrupt to ensure rings are cleaned */
@@ -2821,7 +2837,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
/* if check for link returns error we will need to reset */
if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
link_up = false;
}
@@ -3222,11 +3238,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
+ &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
-
/* if interface is down do nothing */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -3271,9 +3286,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3286,49 +3310,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* update GSO size and bytecount with header size */
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+ mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
@@ -3337,76 +3365,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
+static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 l4_hdr = 0;
- __be16 frag_off;
-
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
- break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
- if (likely(skb_network_header_len(skb) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, skb_network_offset(skb) +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto no_csum;
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbevf_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, l3 proto=%x, l4 proto=%x\n",
- first->protocol, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto no_csum;
}
-
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
}
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3442,7 +3449,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
/* use index 1 context for TSO/FSO/FCOE */
if (tx_flags & IXGBE_TX_FLAGS_TSO)
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -3747,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, max_frame);
+ hw->mac.ops.set_rlpml(hw, max_frame);
return 0;
}
@@ -3890,6 +3897,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
+#define IXGBEVF_MAX_MAC_HDR_LEN 127
+#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3908,7 +3949,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = ixgbevf_features_check,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -4013,26 +4054,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPIP | \
+ NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- netdev->vlan_features |= NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
+ netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IXGBEVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
if (IXGBE_REMOVED(hw->hw_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index dc68fea..61a80da 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.check_for_rst = ixgbevf_check_for_rst_vf,
};
+/* Mailbox operations when running on Hyper-V.
+ * On Hyper-V, PF/VF communication is not through the
+ * hardware mailbox; this communication is through
+ * a software mediated path.
+ * Most mail box operations are noop while running on
+ * Hyper-V.
+ */
+const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 4d613a4..e670d3b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -27,6 +27,12 @@
#include "vf.h"
#include "ixgbevf.h"
+/* On Hyper-V, to reset, we need to read from this offset
+ * from the PCI config space. This is the mechanism used on
+ * Hyper-V to support PF/VF communication.
+ */
+#define IXGBE_HV_RESET_OFFSET 0x201
+
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -126,6 +132,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
+ * Hyper-V variant; the VF/PF communication is through the PCI
+ * config space.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
+ struct ixgbevf_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ pci_read_config_byte(adapter->pdev,
+ (i + IXGBE_HV_RESET_OFFSET),
+ &hw->mac.perm_addr[i]);
+ return 0;
+#else
+ pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
+ return -EOPNOTSUPP;
+#endif
+}
+
+/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
@@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
return ret_val;
}
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ return -EOPNOTSUPP;
+}
+
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
* @adapter: pointer to the port handle
@@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return ret_val;
}
+/**
+ * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ *
+ * We don't really allow setting the device MAC address. However,
+ * if the address being set is the permanent MAC address we will
+ * permit that.
+ **/
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ if (ether_addr_equal(addr, hw->mac.perm_addr))
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
@@ -473,15 +525,22 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_update_xcast_mode - Update Multicast mode
* @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
* @xcast_mode: new multicast mode
*
* Updates the Multicast Mode of VF.
**/
-static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
- struct net_device *netdev, int xcast_mode)
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
@@ -513,6 +572,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -551,6 +618,15 @@ mbx_err:
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
@@ -656,11 +732,72 @@ out:
}
/**
- * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
@@ -670,11 +807,30 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
}
/**
- * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+}
+
+/**
+ * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
@@ -703,6 +859,21 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return 0;
+}
+
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
@@ -769,11 +940,30 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
+ .set_rlpml = ixgbevf_set_rlpml_vf,
+};
+
+static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_hv_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_hv_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
+ .set_rar = ixgbevf_hv_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
+ .set_vfta = ixgbevf_hv_set_vfta_vf,
+ .set_rlpml = ixgbevf_hv_set_rlpml_vf,
};
const struct ixgbevf_info ixgbevf_82599_vf_info = {
@@ -781,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550_vf_info = {
.mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
+ .mac = ixgbe_mac_X550_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
+ .mac = ixgbe_mac_X550EM_x_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index ef9f773..2cac610 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -63,11 +64,12 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ void (*set_rlpml)(struct ixgbe_hw *, u16);
};
enum ixgbe_mac_type {
@@ -207,8 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);