summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-09-18 18:02:26 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2011-09-18 18:02:26 (GMT)
commitb0e7031ac08fa0aa242531c8d9a0cf9ae8ee276d (patch)
tree86ae983c51b9df07ead6f00aeddbf276f672c839 /drivers
parent01a7143586f51f80e1b29ebf240c6e5390657450 (diff)
parent8e2ec639173f325977818c45011ee176ef2b11f6 (diff)
downloadlinux-b0e7031ac08fa0aa242531c8d9a0cf9ae8ee276d.tar.xz
Merge git://github.com/davem330/net
* git://github.com/davem330/net: (62 commits) ipv6: don't use inetpeer to store metrics for routes. can: ti_hecc: include linux/io.h IRDA: Fix global type conflicts in net/irda/irsysctl.c v2 net: Handle different key sizes between address families in flow cache net: Align AF-specific flowi structs to long ipv4: Fix fib_info->fib_metrics leak caif: fix a potential NULL dereference sctp: deal with multiple COOKIE_ECHO chunks ibmveth: Fix checksum offload failure handling ibmveth: Checksum offload is always disabled ibmveth: Fix issue with DMA mapping failure ibmveth: Fix DMA unmap error pch_gbe: support ML7831 IOH pch_gbe: added the process of FIFO over run error pch_gbe: fixed the issue which receives an unnecessary packet. sfc: Use 64-bit writes for TX push where possible Revert "sfc: Use write-combining to reduce TX latency" and follow-ups bnx2x: Fix ethtool advertisement bnx2x: Fix 578xx link LED bnx2x: Fix XMAC loopback test ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c162
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h7
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ibmveth.c48
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c302
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/io.h6
-rw-r--r--drivers/net/sfc/mcdi.c46
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
29 files changed, 603 insertions, 356 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8d0314d..a44874e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2535,7 +2535,7 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig"
config PCH_GBE
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
depends on PCI
select MII
---help---
@@ -2548,10 +2548,11 @@ config PCH_GBE
This driver enables Gigabit Ethernet function.
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223.
- ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
+ Output Hub), ML7223/ML7831.
+ ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+ purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c423504..e46df53 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -315,6 +315,14 @@ union db_prod {
u32 raw;
};
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
+ ETH_MAX_AGGREGATION_QUEUES_E1 :\
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT 16
+#define DROPLESS_FC_HEADROOM 100
/* MC hsi */
#define BCM_PAGE_SHIFT 12
@@ -331,15 +339,35 @@ union db_prod {
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
/* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1)
#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
- (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+ (MAX_RX_SGE_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+ (x) + 1)
#define RX_SGE(x) ((x) & MAX_RX_SGE)
+/*
+ * Number of required SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ * these aggregations will probably consume SGE immidiatelly)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ * after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
+ (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+ MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
+ NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
/* Manipulate a bit vector defined as an array of u64 */
/* Number of bits in one sge_mask array element */
@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
#define NUM_TX_RINGS 16
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+#define NEXT_PAGE_TX_DESC_CNT 1
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD (NUM_TX_BD - 1)
#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
- (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+ (MAX_TX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+ (x) + 1)
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
+#define NEXT_PAGE_RX_DESC_CNT 2
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
#define RX_DESC_MASK (RX_DESC_CNT - 1)
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_AVAIL 128
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ BRB_SIZE(bp)
+#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+ MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp) (NUM_BD_REQ + \
+ NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
MIN_RX_AVAIL))
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
- (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+ (MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+ (x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD)
/*
@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
- (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+ (MAX_RCQ_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+ (x) + 1)
#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+ MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
+ NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
#define FP_CSB_FUNC_OFF \
offsetof(struct cstorm_status_block_c, func)
-#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */
- /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
-#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
- /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
-#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
- /* (HC_INDEX_U_ETH_RX_BD_CONS) */
-
-#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
- /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
+#define HC_INDEX_ETH_RX_CQ_CONS 1
-#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
#define BNX2X_RX_SB_INDEX \
(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1159,12 @@ struct bnx2x {
#define BP_PORT(bp) (bp->pfid & 1)
#define BP_FUNC(bp) (bp->pfid)
#define BP_ABS_FUNC(bp) (bp->pf_num)
-#define BP_E1HVN(bp) (bp->pfid >> 1)
-#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/
-#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
- BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_VN(bp) ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp) (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
+ (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
struct net_device *dev;
struct pci_dev *pdev;
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
- BP_E1HVN(bp))
+ BP_VN(bp))
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
- (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
(x))
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 37e5790..c4cbf97 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
- int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
u16 ring_prod;
int i, j;
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (!fp->disable_tpa) {
/* Fill the per-aggregtion pool */
- for (i = 0; i < max_agg_queues; i++) {
+ for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i];
struct sw_rx_bd *first_buf =
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
bnx2x_free_rx_sge_range(bp, fp,
ring_prod);
bnx2x_free_tpa_pool(bp, fp,
- max_agg_queues);
+ MAX_AGG_QS(bp));
fp->disable_tpa = 1;
ring_prod = 0;
break;
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp);
if (!fp->disable_tpa)
- bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
- ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+ bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
}
}
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
struct bnx2x_fastpath *fp = &bp->fp[index];
int ring_size = 0;
u8 cos;
+ int rx_ring_size = 0;
/* if rx_ring_size specified - use it */
- int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+ if (!bp->rx_ring_size) {
- /* allocate at least number of buffers required by FW */
- rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
- MIN_RX_SIZE_TPA,
- rx_ring_size);
+ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA, rx_ring_size);
+
+ bp->rx_ring_size = rx_ring_size;
+ } else
+ rx_ring_size = bp->rx_ring_size;
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 2218630..cf3e479 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
/* advertise the requested speed and duplex if supported */
- cmd->advertising &= bp->port.supported[cfg_idx];
+ if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ DP(NETIF_MSG_LINK, "Advertisement parameters "
+ "are not supported\n");
+ return -EINVAL;
+ }
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
- bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
cmd->advertising);
+ if (cmd->advertising) {
+
+ bp->link_params.speed_cap_mask[cfg_idx] = 0;
+ if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+ if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+ }
+ if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseKX_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+ if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+ }
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
else
- if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
- ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
- else
- ering->rx_pending = MAX_RX_AVAIL;
+ ering->rx_pending = MAX_RX_AVAIL;
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index d45b155..ba15bdc 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
{
u32 nig_reg_adress_crd_weight = 0;
u32 pbf_reg_adress_crd_weight = 0;
- /* Calculate and set BW for this COS*/
- const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
- const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+ const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+ const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
switch (cos_entry) {
case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
-
- if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
- "was set to 0\n");
- return -EINVAL;
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
}
- *total_bw +=
- ets_params->cos[cos_idx].params.bw_params.bw;
- }
}
- /*Check taotl BW is valid */
+ /* Check total BW is valid */
if ((100 != *total_bw) || (0 == *total_bw)) {
if (0 == *total_bw) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* Check loopback mode */
if (lb)
- val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
bnx2x_set_xumac_nig(params,
((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+ /* Advertised and set FEC (Forward Error Correction) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
/* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
(tmp | EMAC_LED_OVERRIDE));
/*
* return here without enabling traffic
- * LED blink andsetting rate in ON mode.
+ * LED blink and setting rate in ON mode.
* In oper mode, enabling LED blink
* and setting rate is needed.
*/
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
* This is a work-around for HW issue found when link
* is up in CL73
*/
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+ if ((!CHIP_IS_E3(bp)) ||
+ (CHIP_IS_E3(bp) &&
+ mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp) ||
(mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_HW_LOCK_REQUIRED,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_INIT_XGXS_FIRST,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
.addr = 0xff,
.def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_INIT_XGXS_FIRST),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_FAN_FAILURE_DET_REQ |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f74582a..c027e93 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
- opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
- (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
#ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
/* init leading/trailing edge */
if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
/* init leading/trailing edge */
if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
int vn;
bp->vn_weight_sum = 0;
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
u32 vn_cfg = bp->mf_config[vn];
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
struct fairness_vars_per_vn m_fair_vn;
u32 vn_cfg = bp->mf_config[vn];
- int func = 2*vn + BP_PORT(bp);
+ int func = func_by_vn(bp, vn);
u16 vn_min_rate, vn_max_rate;
int i;
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
*
* and there are 2 functions per port
*/
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
/* calculate and set min-max rate for each vn */
if (bp->port.pmf)
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
bnx2x_init_vn_minmax(bp, vn);
/* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
{
- int port = BP_PORT(bp);
int func;
int vn;
/* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
continue;
- func = ((vn << 1) | port);
+ func = func_by_vn(bp, vn);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
(LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
}
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
bnx2x_dcbx_pmf_update(bp);
/* enable nig attention */
- val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
if (bp->common.int_block == INT_BLOCK_HC) {
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 tpa_agg_size = 0;
if (!fp->disable_tpa) {
- pause->sge_th_hi = 250;
- pause->sge_th_lo = 150;
+ pause->sge_th_lo = SGE_TH_LO(bp);
+ pause->sge_th_hi = SGE_TH_HI(bp);
+
+ /* validate SGE ring has enough to cross high threshold */
+ WARN_ON(bp->dropless_fc &&
+ pause->sge_th_hi + FW_PREFETCH_CNT >
+ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
tpa_agg_size = min_t(u32,
(min_t(u32, 8, MAX_SKB_FRAGS) *
SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* pause - not for e1 */
if (!CHIP_IS_E1(bp)) {
- pause->bd_th_hi = 350;
- pause->bd_th_lo = 250;
- pause->rcq_th_hi = 350;
- pause->rcq_th_lo = 250;
+ pause->bd_th_lo = BD_TH_LO(bp);
+ pause->bd_th_hi = BD_TH_HI(bp);
+
+ pause->rcq_th_lo = RCQ_TH_LO(bp);
+ pause->rcq_th_hi = RCQ_TH_HI(bp);
+ /*
+ * validate that rings have enough entries to cross
+ * high thresholds
+ */
+ WARN_ON(bp->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT >
+ bp->rx_ring_size);
+ WARN_ON(bp->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT >
+ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
pause->pri_map = 1;
}
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
* For PF Clients it should be the maximum avaliable number.
* VF driver(s) may want to define it to a smaller value.
*/
- rxq_init->max_tpa_queues =
- (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e2.common.state_machine;
sb_data_p = (u32 *)&sb_data_e2;
data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
} else {
memset(&sb_data_e1x, 0,
sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e1x.common.state_machine;
sb_data_p = (u32 *)&sb_data_e1x;
data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
}
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* take the UNDI lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
@@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
if (CHIP_MODE_IS_4_PORT(bp))
dsb_idx = BP_FUNC(bp);
else
- dsb_idx = BP_E1HVN(bp);
+ dsb_idx = BP_VN(bp);
prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
IGU_BC_BASE_DSB_PROD + dsb_idx :
IGU_NORM_BASE_DSB_PROD + dsb_idx);
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
for (i = 0; i < (num_segs * E1HVN_MAX);
i += E1HVN_MAX) {
addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
u32 val;
/* The mac address is written to entries 1-4 to
preserve entry 0 which is used by the PMF */
- u8 entry = (BP_E1HVN(bp) + 1)*8;
+ u8 entry = (BP_VN(bp) + 1)*8;
val = (mac_addr[0] << 8) | mac_addr[1];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* Check if there is any driver already loaded */
val = REG_RD(bp, MISC_REG_UNPREPARED);
if (val == 0x1) {
- /* Check if it is the UNDI driver
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ /*
+ * Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bnx2x_fw_command(bp, reset_code, 0);
}
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
bnx2x_undi_int_disable(bp);
port = BP_PORT(bp);
@@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bp->fw_seq =
(SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- } else
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ }
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
}
@@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
{
int pfid = BP_FUNC(bp);
- int vn = BP_E1HVN(bp);
int igu_sb_id;
u32 val;
u8 fid, igu_sb_cnt = 0;
bp->igu_base_sb = 0xff;
if (CHIP_INT_MODE_IS_BC(bp)) {
+ int vn = BP_VN(bp);
igu_sb_cnt = bp->igu_sb_cnt;
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x;
@@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->igu_base_sb = 0;
} else {
bp->common.int_block = INT_BLOCK_IGU;
+
+ /* do not allow device reset during IGU info preocessing */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_igu_cam_info(bp);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
/*
@@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
- vn = BP_E1HVN(bp);
+ vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* port info */
bnx2x_get_port_hwinfo(bp);
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
-
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
@@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
+ /* init fw_seq after undi_unload! */
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
- /* Clean the following indirect addresses for all functions since it
+ /*
+ * Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+
+ if (CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
/*
* Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 40266c1..750e844 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -5320,7 +5320,7 @@
#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
-#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
#define XMAC_CTRL_REG_RX_EN (0x1<<1)
#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5766,7 +5766,7 @@
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
#define HW_LOCK_RESOURCE_SPIO 2
-#define HW_LOCK_RESOURCE_UNDI 5
+#define HW_LOCK_RESOURCE_RESET 5
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 771f680..9908f2bb 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
break;
case MAC_TYPE_NONE: /* unreached */
- BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+ DP(BNX2X_MSG_STATS,
+ "stats updated by DMAE but no MAC active\n");
return -1;
default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
static void bnx2x_func_stats_base_init(struct bnx2x *bp)
{
- int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
+ int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
u32 func_stx;
/* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) {
- int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
+ int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a812492..2adc294 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8545c7a..a5a89ec 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
checksum += eeprom_data;
}
+#ifdef CONFIG_PARISC
+ /* This is a signature and not a checksum on HP c8000 */
+ if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
+ return E1000_SUCCESS;
+
+#endif
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 3e66792..8dd5fcc 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr;
unsigned long set_attr6, clr_attr6;
- long ret, ret6;
+ long ret, ret4, ret6;
int rc1 = 0, rc2 = 0;
int restart = 0;
@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
set_attr = 0;
clr_attr = 0;
+ set_attr6 = 0;
+ clr_attr6 = 0;
if (data) {
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
!(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
- ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr);
- if (ret != H_SUCCESS) {
+ if (ret4 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv4 checksum "
"offload settings. %d rc=%ld\n",
- data, ret);
+ data, ret4);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IP_CSUM;
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr, clr_attr, &ret_attr);
} else {
adapter->fw_ipv4_csum_support = data;
}
@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret6 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv6 checksum "
"offload settings. %d rc=%ld\n",
- data, ret);
+ data, ret6);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IPV6_CSUM;
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr6, clr_attr6,
- &ret_attr);
} else
adapter->fw_ipv6_csum_support = data;
- if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+ if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
adapter->rx_csum = data;
else
rc1 = -EIO;
@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
union ibmveth_buf_desc descs[6];
int last, i;
int force_bounce = 0;
+ dma_addr_t dma_addr;
/*
* veth handles a maximum of 6 segments including the header, so
@@ -994,17 +1004,16 @@ retry_bounce:
}
/* Map the header */
- descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto map_failed;
descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ descs[0].fields.address = dma_addr;
/* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long dma_addr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1026,7 +1035,12 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index 59fac77..a09a0719 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
/* Reset */
#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
-#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
-#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
+#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
/* TCP/IP Accelerator Control */
#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
+/* RX DMA STATUS */
+#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
+
/* Wake On LAN Status */
#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
struct pch_gbe_buffer {
struct sk_buff *skb;
dma_addr_t dma;
+ unsigned char *rx_buffer;
unsigned long time_stamp;
u16 length;
bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
struct pch_gbe_rx_ring {
struct pch_gbe_rx_desc *desc;
dma_addr_t dma;
+ unsigned char *rx_buff_pool;
+ dma_addr_t rx_buff_pool_logic;
+ unsigned int rx_buff_pool_size;
unsigned int size;
unsigned int count;
unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
unsigned long rx_buffer_len;
unsigned long tx_queue_len;
bool have_msi;
+ bool rx_stop_flag;
};
extern const char pch_driver_version[];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index eac3c5c..567ff10 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
-#include <linux/prefetch.h>
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
+#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
/* Macros for ML7223 */
#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
+
#define PCH_GBE_TX_WEIGHT 64
#define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
)
/* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
#define PCH_GBE_FRAME_SIZE_2048 2048
#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_ENABLE_MASK ( \
PCH_GBE_INT_RX_DMA_CMPLT | \
PCH_GBE_INT_RX_DSC_EMP | \
+ PCH_GBE_INT_RX_FIFO_ERR | \
PCH_GBE_INT_WOL_DET | \
PCH_GBE_INT_TX_CMPLT \
)
+#define PCH_GBE_INT_DISABLE_ALL 0
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
if (!tmp)
pr_err("Error: busy bit is not cleared\n");
}
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+ u32 tmp;
+ int ret = -1;
+ /* wait busy */
+ tmp = 20;
+ while ((ioread32(reg) & bit) && --tmp)
+ udelay(5);
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+ else
+ ret = 0;
+ return ret;
+}
+
/**
* pch_gbe_mac_mar_set - Set MAC address register
* @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
return;
}
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+ pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+ /* Setup the MAC address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
/**
* pch_gbe_mac_init_rx_addrs - Initialize receive address's
* @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
tcpip = ioread32(&hw->reg->TCPIP_ACC);
- if (netdev->features & NETIF_F_RXCSUM) {
- tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
- tcpip |= PCH_GBE_RX_TCPIPACC_EN;
- } else {
- tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
- tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
- }
+ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
iowrite32(tcpip, &hw->reg->TCPIP_ACC);
return;
}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
iowrite32(rdba, &hw->reg->RX_DSC_BASE);
iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-
- /* Enables Receive DMA */
- rxdma = ioread32(&hw->reg->DMA_CTRL);
- rxdma |= PCH_GBE_RX_DMA_EN;
- iowrite32(rxdma, &hw->reg->DMA_CTRL);
- /* Enables Receive */
- iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
}
/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rxdma;
+ u16 value;
+ int ret;
+
+ /* Disable Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Wait Rx DMA BUS is IDLE */
+ ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+ if (ret) {
+ /* Disable Bus master */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+ value &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ /* Enable Bus master */
+ value |= PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ } else {
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+ u32 rxdma;
+
+ /* Enables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma |= PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Enables Receive */
+ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+ return;
+}
+
/**
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
adapter->stats.intr_rx_frame_err_count++;
if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
- adapter->stats.intr_rx_fifo_err_count++;
+ if (!adapter->rx_stop_flag) {
+ adapter->stats.intr_rx_fifo_err_count++;
+ pr_debug("Rx fifo over run\n");
+ adapter->rx_stop_flag = true;
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+ &hw->reg->INT_EN);
+ pch_gbe_stop_receive(adapter);
+ }
if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++;
if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* When Rx descriptor is empty */
if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
adapter->stats.intr_rx_dsc_empty_count++;
- pr_err("Rx descriptor is empty\n");
+ pr_debug("Rx descriptor is empty\n");
int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
if (hw->mac.tx_fc_enable) {
@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
unsigned int i;
unsigned int bufsz;
- bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
+ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
while ((cleaned_count--)) {
buffer_info = &rx_ring->buffer_info[i];
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- } else {
- skb = netdev_alloc_skb(netdev, bufsz);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->stats.rx_alloc_buff_failed++;
- break;
- }
- /* 64byte align */
- skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->stats.rx_alloc_buff_failed++;
+ break;
}
+ /* align */
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = skb;
+
buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data,
+ buffer_info->rx_buffer,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
return;
}
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned int i;
+ unsigned int bufsz;
+ unsigned int size;
+
+ bufsz = adapter->rx_buffer_len;
+
+ size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+ rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL);
+ if (!rx_ring->rx_buff_pool) {
+ pr_err("Unable to allocate memory for the receive poll buffer\n");
+ return -ENOMEM;
+ }
+ memset(rx_ring->rx_buff_pool, 0, size);
+ rx_ring->rx_buff_pool_size = size;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+ buffer_info->length = bufsz;
+ }
+ return 0;
+}
+
/**
* pch_gbe_alloc_tx_buffers - Allocate transmit buffers
* @adapter: Board private structure
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb, *new_skb;
+ struct sk_buff *skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
rx_desc->gbec_status = DSC_INIT16;
buffer_info = &rx_ring->buffer_info[i];
skb = buffer_info->skb;
+ buffer_info->skb = NULL;
/* unmap dma */
dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length, DMA_FROM_DEVICE);
buffer_info->mapped = false;
- /* Prefetch the packet */
- prefetch(skb->data);
pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
"TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3] */
- length = (rx_desc->rx_words_eob) - 3;
-
- /* Decide the data conversion method */
- if (!(netdev->features & NETIF_F_RXCSUM)) {
- /* [Header:14][payload] */
- if (NET_IP_ALIGN) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- length);
- skb = new_skb;
- } else {
- /* DMA buffer is used as SKB as it is.*/
- buffer_info->skb = NULL;
- }
- } else {
- /* [Header:14][padding:2][payload] */
- /* The length includes padding length */
- length = length - PCH_GBE_DMA_PADDING;
- if ((length < copybreak) ||
- (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.
- * Padding data is deleted
- * at the time of a copy.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- ETH_HLEN);
- memcpy(&new_skb->data[ETH_HLEN],
- &skb->data[ETH_HLEN +
- PCH_GBE_DMA_PADDING],
- length - ETH_HLEN);
- skb = new_skb;
- } else {
- /* Padding data is deleted
- * by moving header data.*/
- memmove(&skb->data[PCH_GBE_DMA_PADDING],
- &skb->data[0], ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
- buffer_info->skb = NULL;
- }
- }
- /* The length includes FCS length */
- length = length - ETH_FCS_LEN;
+ /* length convert[-3], length includes FCS length */
+ length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+ if (rx_desc->rx_words_eob & 0x02)
+ length = length - 4;
+ /*
+ * buffer_info->rx_buffer: [Header:14][payload]
+ * skb->data: [Reserve:2][Header:14][payload]
+ */
+ memcpy(skb->data, buffer_info->rx_buffer, length);
+
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_debug("Receive skb->ip_summed: %d length: %d\n",
skb->ip_summed, length);
}
-dorrop:
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
pr_err("Error: can't bring device up\n");
return err;
}
+ err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
pch_gbe_alloc_tx_buffers(adapter, tx_ring);
pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
adapter->tx_queue_len = netdev->tx_queue_len;
+ pch_gbe_start_receive(&adapter->hw);
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
void pch_gbe_down(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_reset(adapter);
pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+ pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+ rx_ring->rx_buff_pool_logic = 0;
+ rx_ring->rx_buff_pool_size = 0;
+ rx_ring->rx_buff_pool = NULL;
}
/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
int max_frame;
+ unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+ int err;
max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
else
- adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
- netdev->mtu = new_mtu;
- adapter->hw.mac.max_frame_size = max_frame;
+ adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
- if (netif_running(netdev))
- pch_gbe_reinit_locked(adapter);
- else
+ if (netif_running(netdev)) {
+ pch_gbe_down(adapter);
+ err = pch_gbe_up(adapter);
+ if (err) {
+ adapter->rx_buffer_len = old_rx_buffer_len;
+ pch_gbe_up(adapter);
+ return -ENOMEM;
+ } else {
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+ } else {
pch_gbe_reset(adapter);
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
int work_done = 0;
bool poll_end_flag = false;
bool cleaned = false;
+ u32 int_en;
pr_debug("budget : %d\n", budget);
@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
if (!netif_carrier_ok(netdev)) {
poll_end_flag = true;
} else {
- cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ int_en = ioread32(&adapter->hw.reg->INT_EN);
+ iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
+ &adapter->hw.reg->INT_EN);
+ }
+ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
if (cleaned)
work_done = budget;
@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
.class_mask = (0xFFFF00)
},
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
/* required last entry */
{0}
};
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index faca764..b59abc7 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
- bool use_wc;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
-
- /* bug22643: If SR-IOV is enabled then tx push over a write combined
- * mapping is unsafe. We need to disable write combining in this case.
- * MSI is unsupported when SR-IOV is enabled, and the firmware will
- * have removed the MSI capability. So write combining is safe if
- * there is an MSI capability.
- */
- use_wc = (!EFX_WORKAROUND_22643(efx) ||
- pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
- if (use_wc)
- efx->membase = ioremap_wc(efx->membase_phys,
- efx->type->mem_map_size);
- else
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys,
+ efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index cc97880..751d1ec 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
- wmb();
}
/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
- rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- wmb();
}
#define efx_writeo_page(efx, value, reg, page) \
_efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3dd45ed..81a4253 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
return &nic_data->mcdi;
}
-static inline void
-efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
-}
-
-static inline void
-efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
-}
-
void efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
const u8 *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = MCDI_PDU(efx);
- unsigned doorbell = MCDI_DOORBELL(efx);
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
unsigned int i;
efx_dword_t hdr;
u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
- efx_mcdi_writed(efx, &hdr, pdu);
+ efx_writed(efx, &hdr, pdu);
for (i = 0; i < inlen; i += 4)
- efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
- pdu + 4 + i);
+ _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+ /* Ensure the payload is written out before the header */
+ wmb();
/* ring the doorbell with a distinctive value */
- EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
- efx_mcdi_writed(efx, &hdr, doorbell);
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
}
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = MCDI_PDU(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
BUG_ON(outlen & 3 || outlen >= 0x100);
for (i = 0; i < outlen; i += 4)
- efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
+ *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
}
static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int time, finish;
unsigned int respseq, respcmd, error;
- unsigned int pdu = MCDI_PDU(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int rc, spins;
efx_dword_t reg;
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = get_seconds();
- efx_mcdi_readd(efx, &reg, pdu);
+ rmb();
+ efx_readd(efx, &reg, pdu);
/* All 1's indicates that shared memory is in reset (and is
* not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
- efx_mcdi_readd(efx, &reg, pdu + 4);
+ efx_readd(efx, &reg, pdu + 4);
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
#define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
/* Test and clear MC-rebooted flag for this port/function */
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = MCDI_REBOOT_FLAG(efx);
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
efx_dword_t reg;
uint32_t value;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
return false;
- efx_mcdi_readd(efx, &reg, addr);
+ efx_readd(efx, &reg, addr);
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
if (value == 0)
return 0;
EFX_ZERO_DWORD(reg);
- efx_mcdi_writed(efx, &reg, addr);
+ efx_writed(efx, &reg, addr);
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index bafa23a..3edfbaf 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
size = min_t(size_t, table->step, 16);
- if (table->offset >= efx->type->mem_map_size) {
- /* No longer mapped; return dummy data */
- memcpy(buf, "\xde\xc0\xad\xde", 4);
- buf += table->rows * size;
- continue;
- }
-
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 4bd1f28..7443f99 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/**
* struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
- * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
* @wol_filter_id: Wake-on-LAN packet filter id
*/
struct siena_nic_data {
struct efx_mcdi_iface mcdi;
- void __iomem *mcdi_smem;
int wol_filter_id;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 5735e84..2c3bd93 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- /* Initialise MCDI */
- nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP *
- FR_CZ_MC_TREG_SMEM_ROWS);
- if (!nic_data->mcdi_smem) {
- netif_err(efx, probe, efx->net_dev,
- "could not map MCDI at %llx+%x\n",
- (unsigned long long)efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
- rc = -ENOMEM;
- goto fail1;
- }
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- goto fail2;
+ goto fail1;
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
fail3:
efx_mcdi_drv_attach(efx, false, NULL);
fail2:
- iounmap(nic_data->mcdi_smem);
fail1:
kfree(efx->nic_data);
return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
static void siena_remove_nic(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
-
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
- iounmap(nic_data->mcdi_smem);
- kfree(nic_data);
+ kfree(efx->nic_data);
efx->nic_data = NULL;
}
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
+ .mem_map_size = (FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 99ff114..e4dd3a7 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,8 +38,6 @@
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
-/* Write combining and sriov=enabled are incompatible */
-#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1..13c1f04 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
+#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c091..2d394af 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
if (!IS_CHAN_B(chan) &&
- !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
+ !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ IS_CHAN_HT20(chan)))
supported = true;
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1baca8e..fcafec0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesAdditional,
modesIndex, regWrites);
- if (AR_SREV_9300(ah))
+ if (AR_SREV_9330(ah))
REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6530694..722967b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
mutex_lock(&sc->mutex);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ if (ah->ah_flags & AH_UNPLUGGED) {
+ ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+ mutex_unlock(&sc->mutex);
+ return;
+ }
+
if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 977bd24..164bcae 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
out:
- rs_sta->last_txrate_idx = index;
- if (sband->band == IEEE80211_BAND_5GHZ)
- info->control.rates[0].idx = rs_sta->last_txrate_idx -
- IWL_FIRST_OFDM_RATE;
- else
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
+ index = IWL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = index;
+ info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = index;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
IWL_DEBUG_RATE(priv, "leave: %d\n", index);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a895a09..5621100 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
+ memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index a6b2b1d..222d410 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
+ txq->time_stamp = jiffies;
+
iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa..04c4e9e 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_NOLINK;
memset(mac->bssid, 0, 6);
+
+ /* reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ rtl_cam_reset_all_entry(hw);
mac->vendor = PEER_UNKNOWN;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*or clear all entry here.
*/
rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+
+ rtl_cam_reset_sec_info(hw);
+
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 906e7aa..3e52a54 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
- if (tcb_desc->packet_bw) {
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
SET_TX_DESC_DATA_BW(txdesc, 1);
SET_TX_DESC_DATA_SC(txdesc, 3);
+ } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
+ SET_TX_DESC_DATA_BW(txdesc, 1);
+ SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
} else {
SET_TX_DESC_DATA_BW(txdesc, 0);
- if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
- SET_TX_DESC_DATA_SC(txdesc,
- mac->cur_40_prime_sc);
- }
+ SET_TX_DESC_DATA_SC(txdesc, 0);
+ }
} else {
SET_TX_DESC_DATA_BW(txdesc, 0);
SET_TX_DESC_DATA_SC(txdesc, 0);