summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bcma/driver_chipcommon.c23
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c3
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c5
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c35
-rw-r--r--drivers/bcma/driver_mips.c48
-rw-r--r--drivers/bcma/driver_pci_host.c14
-rw-r--r--drivers/bcma/host_pci.c6
-rw-r--r--drivers/bcma/main.c54
-rw-r--r--drivers/bcma/sprom.c5
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/dma/ioat/dca.c23
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hisax/callc.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c5
-rw-r--r--drivers/isdn/pcbit/layer2.c2
-rw-r--r--drivers/net/can/Kconfig9
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/grcan.c1756
-rw-r--r--drivers/net/can/janz-ican3.c4
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c19
-rw-r--r--drivers/net/can/usb/esd_usb2.c35
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c259
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c460
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c843
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c276
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h8
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1276
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h112
-rw-r--r--drivers/net/ethernet/cadence/macb.c544
-rw-r--r--drivers/net/ethernet/cadence/macb.h56
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h7
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c16
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h51
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c409
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c80
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c779
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c5
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fec.c161
-rw-r--r--drivers/net/ethernet/freescale/fec.h119
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c383
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c94
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c71
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h107
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c336
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1464
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c55
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c158
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c73
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c104
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c278
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c124
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c469
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c394
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig16
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c93
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c16
-rw-r--r--drivers/net/ethernet/realtek/atp.c58
-rw-r--r--drivers/net/ethernet/realtek/atp.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c110
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig8
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.h26
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c630
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c31
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpts.c427
-rw-r--r--drivers/net/ethernet/ti/cpts.h146
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c5
-rw-r--r--drivers/net/irda/sh_irda.c4
-rw-r--r--drivers/net/netconsole.c6
-rw-r--r--drivers/net/phy/davicom.c6
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/smsc.c69
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/tun.c840
-rw-r--r--drivers/net/usb/Kconfig22
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_common.c117
-rw-r--r--drivers/net/usb/cdc_mbim.c412
-rw-r--r--drivers/net/usb/cdc_ncm.c645
-rw-r--r--drivers/net/usb/dm9601.c107
-rw-r--r--drivers/net/usb/int51x1.c52
-rw-r--r--drivers/net/usb/mcs7830.c85
-rw-r--r--drivers/net/usb/net1080.c110
-rw-r--r--drivers/net/usb/plusb.c11
-rw-r--r--drivers/net/usb/sierra_net.c47
-rw-r--r--drivers/net/usb/smsc75xx.c297
-rw-r--r--drivers/net/usb/smsc95xx.c343
-rw-r--r--drivers/net/usb/smsc95xx.h5
-rw-r--r--drivers/net/usb/usbnet.c196
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c36
-rw-r--r--drivers/net/vxlan.c5
-rw-r--r--drivers/net/wan/Makefile4
-rw-r--r--drivers/net/wan/wanxlfw.S1
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ar5523/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1806
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h152
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523_hw.h431
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c70
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c62
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c110
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c171
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c21
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c51
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c7
-rw-r--r--drivers/net/wireless/ath/hw.c20
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h73
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c453
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c198
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c174
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c336
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c41
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c1218
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h145
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c18
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c8
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c20
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c21
-rw-r--r--drivers/net/wireless/mwifiex/init.c19
-rw-r--r--drivers/net/wireless/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/mwifiex/scan.c55
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c26
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c10
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c11
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c17
-rw-r--r--drivers/net/wireless/mwifiex/util.c19
-rw-r--r--drivers/net/wireless/orinoco/main.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c100
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c227
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c88
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c93
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c95
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c65
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c89
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c21
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h53
-rw-r--r--drivers/nfc/Makefile2
-rw-r--r--drivers/nfc/pn533.c18
-rw-r--r--drivers/nfc/pn544/Makefile7
-rw-r--r--drivers/nfc/pn544/i2c.c500
-rw-r--r--drivers/nfc/pn544/pn544.c (renamed from drivers/nfc/pn544_hci.c)679
-rw-r--r--drivers/nfc/pn544/pn544.h32
-rw-r--r--drivers/pps/Kconfig1
-rw-r--r--drivers/ptp/Kconfig19
-rw-r--r--drivers/ptp/ptp_chardev.c32
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c3
-rw-r--r--drivers/ssb/driver_mipscore.c16
-rw-r--r--drivers/vhost/net.c111
-rw-r--r--drivers/vhost/tcm_vhost.c1
-rw-r--r--drivers/vhost/vhost.c52
-rw-r--r--drivers/vhost/vhost.h11
286 files changed, 18316 insertions, 8462 deletions
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index a4c3ebc..ffd74e5 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -22,12 +22,9 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
return value;
}
-void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
{
- u32 leddc_on = 10;
- u32 leddc_off = 90;
-
- if (cc->setup_done)
+ if (cc->early_setup_done)
return;
if (cc->core->id.rev >= 11)
@@ -36,6 +33,22 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
if (cc->core->id.rev >= 35)
cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ bcma_pmu_early_init(cc);
+
+ cc->early_setup_done = true;
+}
+
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
+{
+ u32 leddc_on = 10;
+ u32 leddc_off = 90;
+
+ if (cc->setup_done)
+ return;
+
+ bcma_core_chipcommon_early_init(cc);
+
if (cc->core->id.rev >= 20) {
bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0);
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 9042781..dbda91e 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -32,6 +32,9 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
}
cc->nflash.present = true;
+ if (cc->core->id.rev == 38 &&
+ (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT))
+ cc->nflash.boot = true;
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 201faf1..a63ddd9 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -144,7 +144,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
}
}
-void bcma_pmu_init(struct bcma_drv_cc *cc)
+void bcma_pmu_early_init(struct bcma_drv_cc *cc)
{
u32 pmucap;
@@ -153,7 +153,10 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
+}
+void bcma_pmu_init(struct bcma_drv_cc *cc)
+{
if (cc->pmu.rev == 1)
bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
~BCMA_CC_PMU_CTL_NOILPONW);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 2c4eec2..63e6883 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -12,7 +12,7 @@
static struct resource bcma_sflash_resource = {
.name = "bcma_sflash",
- .start = BCMA_SFLASH,
+ .start = BCMA_SOC_FLASH2,
.end = 0,
.flags = IORESOURCE_MEM | IORESOURCE_READONLY,
};
@@ -31,15 +31,42 @@ struct bcma_sflash_tbl_e {
};
static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
- { "", 0x14, 0x10000, 32, },
+ { "M25P20", 0x11, 0x10000, 4, },
+ { "M25P40", 0x12, 0x10000, 8, },
+
+ { "M25P16", 0x14, 0x10000, 32, },
+ { "M25P32", 0x14, 0x10000, 64, },
+ { "M25P64", 0x16, 0x10000, 128, },
+ { "M25FL128", 0x17, 0x10000, 256, },
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+ { "SST25WF512", 1, 0x1000, 16, },
+ { "SST25VF512", 0x48, 0x1000, 16, },
+ { "SST25WF010", 2, 0x1000, 32, },
+ { "SST25VF010", 0x49, 0x1000, 32, },
+ { "SST25WF020", 3, 0x1000, 64, },
+ { "SST25VF020", 0x43, 0x1000, 64, },
+ { "SST25WF040", 4, 0x1000, 128, },
+ { "SST25VF040", 0x44, 0x1000, 128, },
+ { "SST25VF040B", 0x8d, 0x1000, 128, },
+ { "SST25WF080", 5, 0x1000, 256, },
+ { "SST25VF080B", 0x8e, 0x1000, 256, },
+ { "SST25VF016", 0x41, 0x1000, 512, },
+ { "SST25VF032", 0x4a, 0x1000, 1024, },
+ { "SST25VF064", 0x4b, 0x1000, 2048, },
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+ { "AT45DB011", 0xc, 256, 512, },
+ { "AT45DB021", 0x14, 256, 1024, },
+ { "AT45DB041", 0x1c, 256, 2048, },
+ { "AT45DB081", 0x24, 256, 4096, },
+ { "AT45DB161", 0x2c, 512, 4096, },
+ { "AT45DB321", 0x34, 512, 8192, },
+ { "AT45DB642", 0x3c, 1024, 8192, },
{ 0 },
};
@@ -84,6 +111,8 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
break;
}
break;
+ case 0x13:
+ return -ENOTSUPP;
default:
for (e = bcma_sflash_st_tbl; e->name; e++) {
if (e->id == id)
@@ -116,7 +145,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
return -ENOTSUPP;
}
- sflash->window = BCMA_SFLASH;
+ sflash->window = BCMA_SOC_FLASH2;
sflash->blocksize = e->blocksize;
sflash->numblocks = e->numblocks;
sflash->size = sflash->blocksize * sflash->numblocks;
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index cc65b45..170822e 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -181,47 +181,66 @@ EXPORT_SYMBOL(bcma_cpu_clock);
static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
+ struct bcma_drv_cc *cc = &bus->drv_cc;
- switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
+ switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
case BCMA_CC_FLASHT_ATSER:
bcma_debug(bus, "Found serial flash\n");
- bcma_sflash_init(&bus->drv_cc);
+ bcma_sflash_init(cc);
break;
case BCMA_CC_FLASHT_PARA:
bcma_debug(bus, "Found parallel flash\n");
- bus->drv_cc.pflash.window = 0x1c000000;
- bus->drv_cc.pflash.window_size = 0x02000000;
+ cc->pflash.present = true;
+ cc->pflash.window = BCMA_SOC_FLASH2;
+ cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
- if ((bcma_read32(bus->drv_cc.core, BCMA_CC_FLASH_CFG) &
+ if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
BCMA_CC_FLASH_CFG_DS) == 0)
- bus->drv_cc.pflash.buswidth = 1;
+ cc->pflash.buswidth = 1;
else
- bus->drv_cc.pflash.buswidth = 2;
+ cc->pflash.buswidth = 2;
break;
default:
bcma_err(bus, "Flash type not supported\n");
}
- if (bus->drv_cc.core->id.rev == 38 ||
+ if (cc->core->id.rev == 38 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
- if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) {
+ if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
bcma_debug(bus, "Found NAND flash\n");
- bcma_nflash_init(&bus->drv_cc);
+ bcma_nflash_init(cc);
}
}
}
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+ struct bcma_bus *bus = mcore->core->bus;
+
+ if (mcore->early_setup_done)
+ return;
+
+ bcma_chipco_serial_init(&bus->drv_cc);
+ bcma_core_mips_flash_detect(mcore);
+
+ mcore->early_setup_done = true;
+}
+
void bcma_core_mips_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus;
struct bcma_device *core;
bus = mcore->core->bus;
+ if (mcore->setup_done)
+ return;
+
bcma_info(bus, "Initializing MIPS core...\n");
- if (!mcore->setup_done)
- mcore->assigned_irqs = 1;
+ bcma_core_mips_early_init(mcore);
+
+ mcore->assigned_irqs = 1;
/* Assign IRQs to all cores on the bus */
list_for_each_entry(core, &bus->cores, list) {
@@ -256,10 +275,5 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
bcma_info(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
- if (mcore->setup_done)
- return;
-
- bcma_chipco_serial_init(&bus->drv_cc);
- bcma_core_mips_flash_detect(mcore);
mcore->setup_done = true;
}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 9baf886..e564495 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -35,11 +35,6 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
chipid_top != 0x5300)
return false;
- if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
- bcma_info(bus, "This PCI core is disabled and not working\n");
- return false;
- }
-
bcma_core_enable(pc->core, 0);
return !mips_busprobe32(tmp, pc->core->io_addr);
@@ -396,6 +391,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
bcma_info(bus, "PCIEcore in host mode found\n");
+ if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
+ bcma_info(bus, "This PCIE core is disabled and not working\n");
+ return;
+ }
+
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
if (!pc_host) {
bcma_err(bus, "can not allocate memory");
@@ -452,6 +452,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
+ pc_host->io_resource.start = 0x100;
+ pc_host->io_resource.end = 0x47F;
pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
tmp | BCMA_SOC_PCI_MEM);
@@ -459,6 +461,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
+ pc_host->io_resource.start = 0x480;
+ pc_host->io_resource.end = 0x7FF;
pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index b6b4b5e..98fdc3e 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -238,7 +238,7 @@ static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int bcma_host_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -261,11 +261,11 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
bcma_host_pci_resume);
#define BCMA_PM_OPS (&bcma_pm_ops)
-#else /* CONFIG_PM */
+#else /* CONFIG_PM_SLEEP */
#define BCMA_PM_OPS NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index d865470..a971889 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -81,6 +81,18 @@ struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
}
EXPORT_SYMBOL_GPL(bcma_find_core);
+static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit)
+{
+ struct bcma_device *core;
+
+ list_for_each_entry(core, &bus->cores, list) {
+ if (core->id.id == coreid && core->core_unit == unit)
+ return core;
+ }
+ return NULL;
+}
+
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
@@ -183,6 +195,20 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
return -1;
}
+ /* Early init CC core */
+ core = bcma_find_core(bus, bcma_cc_core_id(bus));
+ if (core) {
+ bus->drv_cc.core = core;
+ bcma_core_chipcommon_early_init(&bus->drv_cc);
+ }
+
+ /* Try to get SPROM */
+ err = bcma_sprom_get(bus);
+ if (err == -ENOENT) {
+ bcma_err(bus, "No SPROM available\n");
+ } else if (err)
+ bcma_err(bus, "Failed to get SPROM: %d\n", err);
+
/* Init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
@@ -198,10 +224,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
}
/* Init PCIE core */
- core = bcma_find_core(bus, BCMA_CORE_PCIE);
+ core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
if (core) {
- bus->drv_pci.core = core;
- bcma_core_pci_init(&bus->drv_pci);
+ bus->drv_pci[0].core = core;
+ bcma_core_pci_init(&bus->drv_pci[0]);
+ }
+
+ /* Init PCIE core */
+ core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
+ if (core) {
+ bus->drv_pci[1].core = core;
+ bcma_core_pci_init(&bus->drv_pci[1]);
}
/* Init GBIT MAC COMMON core */
@@ -211,13 +244,6 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
}
- /* Try to get SPROM */
- err = bcma_sprom_get(bus);
- if (err == -ENOENT) {
- bcma_err(bus, "No SPROM available\n");
- } else if (err)
- bcma_err(bus, "Failed to get SPROM: %d\n", err);
-
/* Register found cores */
bcma_register_cores(bus);
@@ -275,18 +301,18 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return -1;
}
- /* Init CC core */
+ /* Early init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
- bcma_core_chipcommon_init(&bus->drv_cc);
+ bcma_core_chipcommon_early_init(&bus->drv_cc);
}
- /* Init MIPS core */
+ /* Early init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
- bcma_core_mips_init(&bus->drv_mips);
+ bcma_core_mips_early_init(&bus->drv_mips);
}
bcma_info(bus, "Early bus registered\n");
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 0d546b6..4adf9ef 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -595,8 +595,11 @@ int bcma_sprom_get(struct bcma_bus *bus)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
err = bcma_sprom_valid(sprom);
- if (err)
+ if (err) {
+ bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n");
+ err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
goto out;
+ }
bcma_sprom_extract_r8(bus, sprom);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 3f4bfc8..9959d4c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -492,7 +492,7 @@ done:
static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
{
u16 buf_len = 0;
- int ret, buf_block_len, blksz;
+ int ret, num_blocks, blksz;
struct sk_buff *skb = NULL;
u32 type;
u8 *payload = NULL;
@@ -514,18 +514,17 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
}
blksz = SDIO_BLOCK_SIZE;
- buf_block_len = (buf_len + blksz - 1) / blksz;
+ num_blocks = DIV_ROUND_UP(buf_len, blksz);
if (buf_len <= SDIO_HEADER_LEN
- || (buf_block_len * blksz) > ALLOC_BUF_SIZE) {
+ || (num_blocks * blksz) > ALLOC_BUF_SIZE) {
BT_ERR("invalid packet length: %d", buf_len);
ret = -EINVAL;
goto exit;
}
/* Allocate buffer */
- skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN,
- GFP_ATOMIC);
+ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
goto exit;
@@ -541,7 +540,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
payload = skb->data;
ret = sdio_readsb(card->func, payload, card->ioport,
- buf_block_len * blksz);
+ num_blocks * blksz);
if (ret < 0) {
BT_ERR("readsb failed: %d", ret);
ret = -EIO;
@@ -553,7 +552,16 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
*/
buf_len = payload[0];
- buf_len |= (u16) payload[1] << 8;
+ buf_len |= payload[1] << 8;
+ buf_len |= payload[2] << 16;
+
+ if (buf_len > blksz * num_blocks) {
+ BT_ERR("Skip incorrect packet: hdrlen %d buffer %d",
+ buf_len, blksz * num_blocks);
+ ret = -EIO;
+ goto exit;
+ }
+
type = payload[3];
switch (type) {
@@ -589,8 +597,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
default:
BT_ERR("Unknown packet type:%d", type);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload,
- blksz * buf_block_len);
+ BT_ERR("hex: %*ph", blksz * num_blocks, payload);
kfree_skb(skb);
skb = NULL;
@@ -849,8 +856,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
if (ret < 0) {
i++;
BT_ERR("i=%d writesb failed: %d", i, ret);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- payload, nb);
+ BT_ERR("hex: %*ph", nb, payload);
ret = -EIO;
if (i > MAX_WRITE_IOMEM_RETRY)
goto exit;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index abd9038..d666807 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -604,6 +604,23 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
return slots;
}
+static inline int dca3_tag_map_invalid(u8 *tag_map)
+{
+ /*
+ * If the tag map is not programmed by the BIOS the default is:
+ * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
+ *
+ * This an invalid map and will result in only 2 possible tags
+ * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
+ * this entire definition is invalid.
+ */
+ return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
+ (tag_map[1] == DCA_TAG_MAP_VALID) &&
+ (tag_map[2] == DCA_TAG_MAP_VALID) &&
+ (tag_map[3] == DCA_TAG_MAP_VALID) &&
+ (tag_map[4] == DCA_TAG_MAP_VALID));
+}
+
struct dca_provider * __devinit
ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
@@ -674,6 +691,12 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
}
+ if (dca3_tag_map_invalid(ioatdca->tag_map)) {
+ dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
+ free_dca_provider(dca);
+ return NULL;
+ }
+
err = register_dca_provider(dca, &pdev->dev);
if (err) {
free_dca_provider(dca);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 81363ff..6e99d73 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -490,7 +490,7 @@ receive_dmsg(struct hfc_pci *hc)
(df->data[le16_to_cpu(zp->z1)])) {
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG
- "empty_fifo hfcpci paket inv. len "
+ "empty_fifo hfcpci packet inv. len "
"%d or crc %d\n",
rcnt,
df->data[le16_to_cpu(zp->z1)]);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 182ecf0..feafa91 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1302,7 +1302,7 @@ modeisar(struct isar_ch *ch, u32 bprotocol)
&ch->is->Flags))
ch->dpath = 1;
else {
- pr_info("modeisar both pathes in use\n");
+ pr_info("modeisar both paths in use\n");
return -EBUSY;
}
if (bprotocol == ISDN_P_B_HDLC)
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index a47637b..ddec47a 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -35,7 +35,7 @@ static int chancount;
/* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */
#define ALERT_REJECT 0
-/* Value to delay the sending of the first B-channel paket after CONNECT
+/* Value to delay the sending of the first B-channel packet after CONNECT
* here is no value given by ITU, but experience shows that 300 ms will
* work on many networks, if you or your other side is behind local exchanges
* a greater value may be recommented. If the delay is to short the first paket
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 334fa90..f60d4be 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -354,7 +354,7 @@ receive_dmsg(struct IsdnCardState *cs)
if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
(df->data[zp->z1])) {
if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
+ debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]);
#ifdef ERROR_STATISTIC
cs->err_rx++;
#endif
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 4db846b..4ec279c 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -270,7 +270,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
if ((count > fifo_size) || (count < 4)) {
if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcsx_read_fifo %d paket inv. len %d ", fifo , count);
+ debugl1(cs, "hfcsx_read_fifo %d packet inv. len %d ", fifo , count);
while (count) {
count--; /* empty fifo */
Read_hfc(cs, HFCSX_FIF_DRD);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index db50f78..f8e405c 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -277,7 +277,6 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
- int multi = 0;
u8 frame[len + 32];
struct socket *socket = NULL;
@@ -317,9 +316,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
*p++ = hc->id >> 8;
*p++ = hc->id;
}
- *p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */
- if (multi == 1)
- *p++ = len; /* length */
+ *p++ = 0x00 + channel; /* m-flag, channel */
*p++ = timebase >> 8; /* time base */
*p++ = timebase;
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index a18e639..42ecfef 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -508,7 +508,7 @@ pcbit_irq_handler(int interrupt, void *devptr)
return IRQ_NONE;
}
if (dev->interrupt) {
- printk(KERN_DEBUG "pcbit: reentering interrupt hander\n");
+ printk(KERN_DEBUG "pcbit: reentering interrupt handler\n");
return IRQ_HANDLED;
}
dev->interrupt = 1;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index bb709fd..b56bd9e 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -110,6 +110,15 @@ config PCH_CAN
is an IOH for x86 embedded processor (Intel Atom E6xx series).
This driver can access CAN bus.
+config CAN_GRCAN
+ tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
+ depends on CAN_DEV && OF
+ ---help---
+ Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
+ Note that the driver supports little endian, even though little
+ endian syntheses of the cores would need some modifications on
+ the hardware level to work.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 938be37..7de5986 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -22,5 +22,6 @@ obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
+obj-$(CONFIG_CAN_GRCAN) += grcan.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
new file mode 100644
index 0000000..391f484
--- /dev/null
+++ b/drivers/net/can/grcan.c
@@ -0,0 +1,1756 @@
+/*
+ * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN.
+ *
+ * 2012 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRCAN and GRHCAN CAN controllers available in the GRLIB
+ * VHDL IP core library.
+ *
+ * Full documentation of the GRCAN core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * See "Documentation/devicetree/bindings/net/can/grcan.txt" for information on
+ * open firmware properties.
+ *
+ * See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the
+ * sysfs interface.
+ *
+ * See "Documentation/kernel-parameters.txt" for information on the module
+ * parameters.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Andreas Larsson <andreas@gaisler.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/can/dev.h>
+#include <linux/spinlock.h>
+
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+
+#include <linux/of_irq.h>
+
+#include <linux/dma-mapping.h>
+
+#define DRV_NAME "grcan"
+
+#define GRCAN_NAPI_WEIGHT 32
+
+#define GRCAN_RESERVE_SIZE(slot1, slot2) (((slot2) - (slot1)) / 4 - 1)
+
+struct grcan_registers {
+ u32 conf; /* 0x00 */
+ u32 stat; /* 0x04 */
+ u32 ctrl; /* 0x08 */
+ u32 __reserved1[GRCAN_RESERVE_SIZE(0x08, 0x18)];
+ u32 smask; /* 0x18 - CanMASK */
+ u32 scode; /* 0x1c - CanCODE */
+ u32 __reserved2[GRCAN_RESERVE_SIZE(0x1c, 0x100)];
+ u32 pimsr; /* 0x100 */
+ u32 pimr; /* 0x104 */
+ u32 pisr; /* 0x108 */
+ u32 pir; /* 0x10C */
+ u32 imr; /* 0x110 */
+ u32 picr; /* 0x114 */
+ u32 __reserved3[GRCAN_RESERVE_SIZE(0x114, 0x200)];
+ u32 txctrl; /* 0x200 */
+ u32 txaddr; /* 0x204 */
+ u32 txsize; /* 0x208 */
+ u32 txwr; /* 0x20C */
+ u32 txrd; /* 0x210 */
+ u32 txirq; /* 0x214 */
+ u32 __reserved4[GRCAN_RESERVE_SIZE(0x214, 0x300)];
+ u32 rxctrl; /* 0x300 */
+ u32 rxaddr; /* 0x304 */
+ u32 rxsize; /* 0x308 */
+ u32 rxwr; /* 0x30C */
+ u32 rxrd; /* 0x310 */
+ u32 rxirq; /* 0x314 */
+ u32 rxmask; /* 0x318 */
+ u32 rxcode; /* 0x31C */
+};
+
+#define GRCAN_CONF_ABORT 0x00000001
+#define GRCAN_CONF_ENABLE0 0x00000002
+#define GRCAN_CONF_ENABLE1 0x00000004
+#define GRCAN_CONF_SELECT 0x00000008
+#define GRCAN_CONF_SILENT 0x00000010
+#define GRCAN_CONF_SAM 0x00000020 /* Available in some hardware */
+#define GRCAN_CONF_BPR 0x00000300 /* Note: not BRP */
+#define GRCAN_CONF_RSJ 0x00007000
+#define GRCAN_CONF_PS1 0x00f00000
+#define GRCAN_CONF_PS2 0x000f0000
+#define GRCAN_CONF_SCALER 0xff000000
+#define GRCAN_CONF_OPERATION \
+ (GRCAN_CONF_ABORT | GRCAN_CONF_ENABLE0 | GRCAN_CONF_ENABLE1 \
+ | GRCAN_CONF_SELECT | GRCAN_CONF_SILENT | GRCAN_CONF_SAM)
+#define GRCAN_CONF_TIMING \
+ (GRCAN_CONF_BPR | GRCAN_CONF_RSJ | GRCAN_CONF_PS1 \
+ | GRCAN_CONF_PS2 | GRCAN_CONF_SCALER)
+
+#define GRCAN_CONF_RSJ_MIN 1
+#define GRCAN_CONF_RSJ_MAX 4
+#define GRCAN_CONF_PS1_MIN 1
+#define GRCAN_CONF_PS1_MAX 15
+#define GRCAN_CONF_PS2_MIN 2
+#define GRCAN_CONF_PS2_MAX 8
+#define GRCAN_CONF_SCALER_MIN 0
+#define GRCAN_CONF_SCALER_MAX 255
+#define GRCAN_CONF_SCALER_INC 1
+
+#define GRCAN_CONF_BPR_BIT 8
+#define GRCAN_CONF_RSJ_BIT 12
+#define GRCAN_CONF_PS1_BIT 20
+#define GRCAN_CONF_PS2_BIT 16
+#define GRCAN_CONF_SCALER_BIT 24
+
+#define GRCAN_STAT_PASS 0x000001
+#define GRCAN_STAT_OFF 0x000002
+#define GRCAN_STAT_OR 0x000004
+#define GRCAN_STAT_AHBERR 0x000008
+#define GRCAN_STAT_ACTIVE 0x000010
+#define GRCAN_STAT_RXERRCNT 0x00ff00
+#define GRCAN_STAT_TXERRCNT 0xff0000
+
+#define GRCAN_STAT_ERRCTR_RELATED (GRCAN_STAT_PASS | GRCAN_STAT_OFF)
+
+#define GRCAN_STAT_RXERRCNT_BIT 8
+#define GRCAN_STAT_TXERRCNT_BIT 16
+
+#define GRCAN_STAT_ERRCNT_WARNING_LIMIT 96
+#define GRCAN_STAT_ERRCNT_PASSIVE_LIMIT 127
+
+#define GRCAN_CTRL_RESET 0x2
+#define GRCAN_CTRL_ENABLE 0x1
+
+#define GRCAN_TXCTRL_ENABLE 0x1
+#define GRCAN_TXCTRL_ONGOING 0x2
+#define GRCAN_TXCTRL_SINGLE 0x4
+
+#define GRCAN_RXCTRL_ENABLE 0x1
+#define GRCAN_RXCTRL_ONGOING 0x2
+
+/* Relative offset of IRQ sources to AMBA Plug&Play */
+#define GRCAN_IRQIX_IRQ 0
+#define GRCAN_IRQIX_TXSYNC 1
+#define GRCAN_IRQIX_RXSYNC 2
+
+#define GRCAN_IRQ_PASS 0x00001
+#define GRCAN_IRQ_OFF 0x00002
+#define GRCAN_IRQ_OR 0x00004
+#define GRCAN_IRQ_RXAHBERR 0x00008
+#define GRCAN_IRQ_TXAHBERR 0x00010
+#define GRCAN_IRQ_RXIRQ 0x00020
+#define GRCAN_IRQ_TXIRQ 0x00040
+#define GRCAN_IRQ_RXFULL 0x00080
+#define GRCAN_IRQ_TXEMPTY 0x00100
+#define GRCAN_IRQ_RX 0x00200
+#define GRCAN_IRQ_TX 0x00400
+#define GRCAN_IRQ_RXSYNC 0x00800
+#define GRCAN_IRQ_TXSYNC 0x01000
+#define GRCAN_IRQ_RXERRCTR 0x02000
+#define GRCAN_IRQ_TXERRCTR 0x04000
+#define GRCAN_IRQ_RXMISS 0x08000
+#define GRCAN_IRQ_TXLOSS 0x10000
+
+#define GRCAN_IRQ_NONE 0
+#define GRCAN_IRQ_ALL \
+ (GRCAN_IRQ_PASS | GRCAN_IRQ_OFF | GRCAN_IRQ_OR \
+ | GRCAN_IRQ_RXAHBERR | GRCAN_IRQ_TXAHBERR \
+ | GRCAN_IRQ_RXIRQ | GRCAN_IRQ_TXIRQ \
+ | GRCAN_IRQ_RXFULL | GRCAN_IRQ_TXEMPTY \
+ | GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_RXSYNC \
+ | GRCAN_IRQ_TXSYNC | GRCAN_IRQ_RXERRCTR \
+ | GRCAN_IRQ_TXERRCTR | GRCAN_IRQ_RXMISS \
+ | GRCAN_IRQ_TXLOSS)
+
+#define GRCAN_IRQ_ERRCTR_RELATED (GRCAN_IRQ_RXERRCTR | GRCAN_IRQ_TXERRCTR \
+ | GRCAN_IRQ_PASS | GRCAN_IRQ_OFF)
+#define GRCAN_IRQ_ERRORS (GRCAN_IRQ_ERRCTR_RELATED | GRCAN_IRQ_OR \
+ | GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR \
+ | GRCAN_IRQ_TXLOSS)
+#define GRCAN_IRQ_DEFAULT (GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_ERRORS)
+
+#define GRCAN_MSG_SIZE 16
+
+#define GRCAN_MSG_IDE 0x80000000
+#define GRCAN_MSG_RTR 0x40000000
+#define GRCAN_MSG_BID 0x1ffc0000
+#define GRCAN_MSG_EID 0x1fffffff
+#define GRCAN_MSG_IDE_BIT 31
+#define GRCAN_MSG_RTR_BIT 30
+#define GRCAN_MSG_BID_BIT 18
+#define GRCAN_MSG_EID_BIT 0
+
+#define GRCAN_MSG_DLC 0xf0000000
+#define GRCAN_MSG_TXERRC 0x00ff0000
+#define GRCAN_MSG_RXERRC 0x0000ff00
+#define GRCAN_MSG_DLC_BIT 28
+#define GRCAN_MSG_TXERRC_BIT 16
+#define GRCAN_MSG_RXERRC_BIT 8
+#define GRCAN_MSG_AHBERR 0x00000008
+#define GRCAN_MSG_OR 0x00000004
+#define GRCAN_MSG_OFF 0x00000002
+#define GRCAN_MSG_PASS 0x00000001
+
+#define GRCAN_MSG_DATA_SLOT_INDEX(i) (2 + (i) / 4)
+#define GRCAN_MSG_DATA_SHIFT(i) ((3 - (i) % 4) * 8)
+
+#define GRCAN_BUFFER_ALIGNMENT 1024
+#define GRCAN_DEFAULT_BUFFER_SIZE 1024
+#define GRCAN_VALID_TR_SIZE_MASK 0x001fffc0
+
+#define GRCAN_INVALID_BUFFER_SIZE(s) \
+ ((s) == 0 || ((s) & ~GRCAN_VALID_TR_SIZE_MASK))
+
+#if GRCAN_INVALID_BUFFER_SIZE(GRCAN_DEFAULT_BUFFER_SIZE)
+#error "Invalid default buffer size"
+#endif
+
+struct grcan_dma_buffer {
+ size_t size;
+ void *buf;
+ dma_addr_t handle;
+};
+
+struct grcan_dma {
+ size_t base_size;
+ void *base_buf;
+ dma_addr_t base_handle;
+ struct grcan_dma_buffer tx;
+ struct grcan_dma_buffer rx;
+};
+
+/* GRCAN configuration parameters */
+struct grcan_device_config {
+ unsigned short enable0;
+ unsigned short enable1;
+ unsigned short select;
+ unsigned int txsize;
+ unsigned int rxsize;
+};
+
+#define GRCAN_DEFAULT_DEVICE_CONFIG { \
+ .enable0 = 0, \
+ .enable1 = 0, \
+ .select = 0, \
+ .txsize = GRCAN_DEFAULT_BUFFER_SIZE, \
+ .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
+ }
+
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRLIB_VERSION_MASK 0xffff
+
+/* GRCAN private data structure */
+struct grcan_priv {
+ struct can_priv can; /* must be the first member */
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ struct grcan_registers __iomem *regs; /* ioremap'ed registers */
+ struct grcan_device_config config;
+ struct grcan_dma dma;
+
+ struct sk_buff **echo_skb; /* We allocate this on our own */
+ u8 *txdlc; /* Length of queued frames */
+
+ /* The echo skb pointer, pointing into echo_skb and indicating which
+ * frames can be echoed back. See the "Notes on the tx cyclic buffer
+ * handling"-comment for grcan_start_xmit for more details.
+ */
+ u32 eskbp;
+
+ /* Lock for controlling changes to the netif tx queue state, accesses to
+ * the echo_skb pointer eskbp and for making sure that a running reset
+ * and/or a close of the interface is done without interference from
+ * other parts of the code.
+ *
+ * The echo_skb pointer, eskbp, should only be accessed under this lock
+ * as it can be changed in several places and together with decisions on
+ * whether to wake up the tx queue.
+ *
+ * The tx queue must never be woken up if there is a running reset or
+ * close in progress.
+ *
+ * A running reset (see below on need_txbug_workaround) should never be
+ * done if the interface is closing down and several running resets
+ * should never be scheduled simultaneously.
+ */
+ spinlock_t lock;
+
+ /* Whether a workaround is needed due to a bug in older hardware. In
+ * this case, the driver both tries to prevent the bug from being
+ * triggered and recovers, if the bug nevertheless happens, by doing a
+ * running reset. A running reset, resets the device and continues from
+ * where it were without being noticeable from outside the driver (apart
+ * from slight delays).
+ */
+ bool need_txbug_workaround;
+
+ /* To trigger initization of running reset and to trigger running reset
+ * respectively in the case of a hanged device due to a txbug.
+ */
+ struct timer_list hang_timer;
+ struct timer_list rr_timer;
+
+ /* To avoid waking up the netif queue and restarting timers
+ * when a reset is scheduled or when closing of the device is
+ * undergoing
+ */
+ bool resetting;
+ bool closing;
+};
+
+/* Wait time for a short wait for ongoing to clear */
+#define GRCAN_SHORTWAIT_USECS 10
+
+/* Limit on the number of transmitted bits of an eff frame according to the CAN
+ * specification: 1 bit start of frame, 32 bits arbitration field, 6 bits
+ * control field, 8 bytes data field, 16 bits crc field, 2 bits ACK field and 7
+ * bits end of frame
+ */
+#define GRCAN_EFF_FRAME_MAX_BITS (1+32+6+8*8+16+2+7)
+
+#if defined(__BIG_ENDIAN)
+static inline u32 grcan_read_reg(u32 __iomem *reg)
+{
+ return ioread32be(reg);
+}
+
+static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
+{
+ iowrite32be(val, reg);
+}
+#else
+static inline u32 grcan_read_reg(u32 __iomem *reg)
+{
+ return ioread32(reg);
+}
+
+static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
+{
+ iowrite32(val, reg);
+}
+#endif
+
+static inline void grcan_clear_bits(u32 __iomem *reg, u32 mask)
+{
+ grcan_write_reg(reg, grcan_read_reg(reg) & ~mask);
+}
+
+static inline void grcan_set_bits(u32 __iomem *reg, u32 mask)
+{
+ grcan_write_reg(reg, grcan_read_reg(reg) | mask);
+}
+
+static inline u32 grcan_read_bits(u32 __iomem *reg, u32 mask)
+{
+ return grcan_read_reg(reg) & mask;
+}
+
+static inline void grcan_write_bits(u32 __iomem *reg, u32 value, u32 mask)
+{
+ u32 old = grcan_read_reg(reg);
+
+ grcan_write_reg(reg, (old & ~mask) | (value & mask));
+}
+
+/* a and b should both be in [0,size] and a == b == size should not hold */
+static inline u32 grcan_ring_add(u32 a, u32 b, u32 size)
+{
+ u32 sum = a + b;
+
+ if (sum < size)
+ return sum;
+ else
+ return sum - size;
+}
+
+/* a and b should both be in [0,size) */
+static inline u32 grcan_ring_sub(u32 a, u32 b, u32 size)
+{
+ return grcan_ring_add(a, size - b, size);
+}
+
+/* Available slots for new transmissions */
+static inline u32 grcan_txspace(size_t txsize, u32 txwr, u32 eskbp)
+{
+ u32 slots = txsize / GRCAN_MSG_SIZE - 1;
+ u32 used = grcan_ring_sub(txwr, eskbp, txsize) / GRCAN_MSG_SIZE;
+
+ return slots - used;
+}
+
+/* Configuration parameters that can be set via module parameters */
+static struct grcan_device_config grcan_module_config =
+ GRCAN_DEFAULT_DEVICE_CONFIG;
+
+static const struct can_bittiming_const grcan_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = GRCAN_CONF_PS1_MIN + 1,
+ .tseg1_max = GRCAN_CONF_PS1_MAX + 1,
+ .tseg2_min = GRCAN_CONF_PS2_MIN,
+ .tseg2_max = GRCAN_CONF_PS2_MAX,
+ .sjw_max = GRCAN_CONF_RSJ_MAX,
+ .brp_min = GRCAN_CONF_SCALER_MIN + 1,
+ .brp_max = GRCAN_CONF_SCALER_MAX + 1,
+ .brp_inc = GRCAN_CONF_SCALER_INC,
+};
+
+static int grcan_set_bittiming(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 timing = 0;
+ int bpr, rsj, ps1, ps2, scaler;
+
+ /* Should never happen - function will not be called when
+ * device is up
+ */
+ if (grcan_read_bits(&regs->ctrl, GRCAN_CTRL_ENABLE))
+ return -EBUSY;
+
+ bpr = 0; /* Note bpr and brp are different concepts */
+ rsj = bt->sjw;
+ ps1 = (bt->prop_seg + bt->phase_seg1) - 1; /* tseg1 - 1 */
+ ps2 = bt->phase_seg2;
+ scaler = (bt->brp - 1);
+ netdev_dbg(dev, "Request for BPR=%d, RSJ=%d, PS1=%d, PS2=%d, SCALER=%d",
+ bpr, rsj, ps1, ps2, scaler);
+ if (!(ps1 > ps2)) {
+ netdev_err(dev, "PS1 > PS2 must hold: PS1=%d, PS2=%d\n",
+ ps1, ps2);
+ return -EINVAL;
+ }
+ if (!(ps2 >= rsj)) {
+ netdev_err(dev, "PS2 >= RSJ must hold: PS2=%d, RSJ=%d\n",
+ ps2, rsj);
+ return -EINVAL;
+ }
+
+ timing |= (bpr << GRCAN_CONF_BPR_BIT) & GRCAN_CONF_BPR;
+ timing |= (rsj << GRCAN_CONF_RSJ_BIT) & GRCAN_CONF_RSJ;
+ timing |= (ps1 << GRCAN_CONF_PS1_BIT) & GRCAN_CONF_PS1;
+ timing |= (ps2 << GRCAN_CONF_PS2_BIT) & GRCAN_CONF_PS2;
+ timing |= (scaler << GRCAN_CONF_SCALER_BIT) & GRCAN_CONF_SCALER;
+ netdev_info(dev, "setting timing=0x%x\n", timing);
+ grcan_write_bits(&regs->conf, timing, GRCAN_CONF_TIMING);
+
+ return 0;
+}
+
+static int grcan_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 status = grcan_read_reg(&regs->stat);
+
+ bec->txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT;
+ bec->rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT;
+ return 0;
+}
+
+static int grcan_poll(struct napi_struct *napi, int budget);
+
+/* Reset device, but keep configuration information */
+static void grcan_reset(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 config = grcan_read_reg(&regs->conf);
+
+ grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
+ grcan_write_reg(&regs->conf, config);
+
+ priv->eskbp = grcan_read_reg(&regs->txrd);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Turn off hardware filtering - regs->rxcode set to 0 by reset */
+ grcan_write_reg(&regs->rxmask, 0);
+}
+
+/* stop device without changing any configurations */
+static void grcan_stop_hardware(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+
+ grcan_write_reg(&regs->imr, GRCAN_IRQ_NONE);
+ grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+ grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_clear_bits(&regs->ctrl, GRCAN_CTRL_ENABLE);
+}
+
+/* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo
+ * is true and free them otherwise.
+ *
+ * If budget is >= 0, stop after handling at most budget skbs. Otherwise,
+ * continue until priv->eskbp catches up to regs->txrd.
+ *
+ * priv->lock *must* be held when calling this function
+ */
+static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct net_device_stats *stats = &dev->stats;
+ int i, work_done;
+
+ /* Updates to priv->eskbp and wake-ups of the queue needs to
+ * be atomic towards the reads of priv->eskbp and shut-downs
+ * of the queue in grcan_start_xmit.
+ */
+ u32 txrd = grcan_read_reg(&regs->txrd);
+
+ for (work_done = 0; work_done < budget || budget < 0; work_done++) {
+ if (priv->eskbp == txrd)
+ break;
+ i = priv->eskbp / GRCAN_MSG_SIZE;
+ if (echo) {
+ /* Normal echo of messages */
+ stats->tx_packets++;
+ stats->tx_bytes += priv->txdlc[i];
+ priv->txdlc[i] = 0;
+ can_get_echo_skb(dev, i);
+ } else {
+ /* For cleanup of untransmitted messages */
+ can_free_echo_skb(dev, i);
+ }
+
+ priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
+ dma->tx.size);
+ txrd = grcan_read_reg(&regs->txrd);
+ }
+ return work_done;
+}
+
+static void grcan_lost_one_shot_frame(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ u32 txrd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ catch_up_echo_skb(dev, -1, true);
+
+ if (unlikely(grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE))) {
+ /* Should never happen */
+ netdev_err(dev, "TXCTRL enabled at TXLOSS in one shot mode\n");
+ } else {
+ /* By the time an GRCAN_IRQ_TXLOSS is generated in
+ * one-shot mode there is no problem in writing
+ * to TXRD even in versions of the hardware in
+ * which GRCAN_TXCTRL_ONGOING is not cleared properly
+ * in one-shot mode.
+ */
+
+ /* Skip message and discard echo-skb */
+ txrd = grcan_read_reg(&regs->txrd);
+ txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size);
+ grcan_write_reg(&regs->txrd, txrd);
+ catch_up_echo_skb(dev, -1, false);
+
+ if (!priv->resetting && !priv->closing &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) {
+ netif_wake_queue(dev);
+ grcan_set_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void grcan_err(struct net_device *dev, u32 sources, u32 status)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame cf;
+
+ /* Zero potential error_frame */
+ memset(&cf, 0, sizeof(cf));
+
+ /* Message lost interrupt. This might be due to arbitration error, but
+ * is also triggered when there is no one else on the can bus or when
+ * there is a problem with the hardware interface or the bus itself. As
+ * arbitration errors can not be singled out, no error frames are
+ * generated reporting this event as an arbitration error.
+ */
+ if (sources & GRCAN_IRQ_TXLOSS) {
+ /* Take care of failed one-shot transmit */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ grcan_lost_one_shot_frame(dev);
+
+ /* Stop printing as soon as error passive or bus off is in
+ * effect to limit the amount of txloss debug printouts.
+ */
+ if (!(status & GRCAN_STAT_ERRCTR_RELATED)) {
+ netdev_dbg(dev, "tx message lost\n");
+ stats->tx_errors++;
+ }
+ }
+
+ /* Conditions dealing with the error counters. There is no interrupt for
+ * error warning, but there are interrupts for increases of the error
+ * counters.
+ */
+ if ((sources & GRCAN_IRQ_ERRCTR_RELATED) ||
+ (status & GRCAN_STAT_ERRCTR_RELATED)) {
+ enum can_state state = priv->can.state;
+ enum can_state oldstate = state;
+ u32 txerr = (status & GRCAN_STAT_TXERRCNT)
+ >> GRCAN_STAT_TXERRCNT_BIT;
+ u32 rxerr = (status & GRCAN_STAT_RXERRCNT)
+ >> GRCAN_STAT_RXERRCNT_BIT;
+
+ /* Figure out current state */
+ if (status & GRCAN_STAT_OFF) {
+ state = CAN_STATE_BUS_OFF;
+ } else if (status & GRCAN_STAT_PASS) {
+ state = CAN_STATE_ERROR_PASSIVE;
+ } else if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT ||
+ rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) {
+ state = CAN_STATE_ERROR_WARNING;
+ } else {
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ /* Handle and report state changes */
+ if (state != oldstate) {
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ netdev_dbg(dev, "bus-off\n");
+ netif_carrier_off(dev);
+ priv->can.can_stats.bus_off++;
+
+ /* Prevent the hardware from recovering from bus
+ * off on its own if restart is disabled.
+ */
+ if (!priv->can.restart_ms)
+ grcan_stop_hardware(dev);
+
+ cf.can_id |= CAN_ERR_BUSOFF;
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ netdev_dbg(dev, "Error passive condition\n");
+ priv->can.can_stats.error_passive++;
+
+ cf.can_id |= CAN_ERR_CRTL;
+ if (txerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ if (rxerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ break;
+
+ case CAN_STATE_ERROR_WARNING:
+ netdev_dbg(dev, "Error warning condition\n");
+ priv->can.can_stats.error_warning++;
+
+ cf.can_id |= CAN_ERR_CRTL;
+ if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ if (rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ break;
+
+ case CAN_STATE_ERROR_ACTIVE:
+ netdev_dbg(dev, "Error active condition\n");
+ cf.can_id |= CAN_ERR_CRTL;
+ break;
+
+ default:
+ /* There are no others at this point */
+ break;
+ }
+ cf.data[6] = txerr;
+ cf.data[7] = rxerr;
+ priv->can.state = state;
+ }
+
+ /* Report automatic restarts */
+ if (priv->can.restart_ms && oldstate == CAN_STATE_BUS_OFF) {
+ unsigned long flags;
+
+ cf.can_id |= CAN_ERR_RESTARTED;
+ netdev_dbg(dev, "restarted\n");
+ priv->can.can_stats.restarts++;
+ netif_carrier_on(dev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!priv->resetting && !priv->closing) {
+ u32 txwr = grcan_read_reg(&regs->txwr);
+
+ if (grcan_txspace(dma->tx.size, txwr,
+ priv->eskbp))
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ /* Data overrun interrupt */
+ if ((sources & GRCAN_IRQ_OR) || (status & GRCAN_STAT_OR)) {
+ netdev_dbg(dev, "got data overrun interrupt\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ /* AHB bus error interrupts (not CAN bus errors) - shut down the
+ * device.
+ */
+ if (sources & (GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR) ||
+ (status & GRCAN_STAT_AHBERR)) {
+ char *txrx = "";
+ unsigned long flags;
+
+ if (sources & GRCAN_IRQ_TXAHBERR) {
+ txrx = "on tx ";
+ stats->tx_errors++;
+ } else if (sources & GRCAN_IRQ_RXAHBERR) {
+ txrx = "on rx ";
+ stats->rx_errors++;
+ }
+ netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
+ txrx);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Prevent anything to be enabled again and halt device */
+ priv->closing = true;
+ netif_stop_queue(dev);
+ grcan_stop_hardware(dev);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ /* Pass on error frame if something to report,
+ * i.e. id contains some information
+ */
+ if (cf.can_id) {
+ struct can_frame *skb_cf;
+ struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf);
+
+ if (skb == NULL) {
+ netdev_dbg(dev, "could not allocate error frame\n");
+ return;
+ }
+ skb_cf->can_id |= cf.can_id;
+ memcpy(skb_cf->data, cf.data, sizeof(cf.data));
+
+ netif_rx(skb);
+ }
+}
+
+static irqreturn_t grcan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 sources, status;
+
+ /* Find out the source */
+ sources = grcan_read_reg(&regs->pimsr);
+ if (!sources)
+ return IRQ_NONE;
+ grcan_write_reg(&regs->picr, sources);
+ status = grcan_read_reg(&regs->stat);
+
+ /* If we got TX progress, the device has not hanged,
+ * so disable the hang timer
+ */
+ if (priv->need_txbug_workaround &&
+ (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
+ del_timer(&priv->hang_timer);
+ }
+
+ /* Frame(s) received or transmitted */
+ if (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_RX)) {
+ /* Disable tx/rx interrupts and schedule poll(). No need for
+ * locking as interference from a running reset at worst leads
+ * to an extra interrupt.
+ */
+ grcan_clear_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
+ napi_schedule(&priv->napi);
+ }
+
+ /* (Potential) error conditions to take care of */
+ if (sources & GRCAN_IRQ_ERRORS)
+ grcan_err(dev, sources, status);
+
+ return IRQ_HANDLED;
+}
+
+/* Reset device and restart operations from where they were.
+ *
+ * This assumes that RXCTRL & RXCTRL is properly disabled and that RX
+ * is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug
+ * for single shot)
+ */
+static void grcan_running_reset(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ unsigned long flags;
+
+ /* This temporarily messes with eskbp, so we need to lock
+ * priv->lock
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->resetting = false;
+ del_timer(&priv->hang_timer);
+ del_timer(&priv->rr_timer);
+
+ if (!priv->closing) {
+ /* Save and reset - config register preserved by grcan_reset */
+ u32 imr = grcan_read_reg(&regs->imr);
+
+ u32 txaddr = grcan_read_reg(&regs->txaddr);
+ u32 txsize = grcan_read_reg(&regs->txsize);
+ u32 txwr = grcan_read_reg(&regs->txwr);
+ u32 txrd = grcan_read_reg(&regs->txrd);
+ u32 eskbp = priv->eskbp;
+
+ u32 rxaddr = grcan_read_reg(&regs->rxaddr);
+ u32 rxsize = grcan_read_reg(&regs->rxsize);
+ u32 rxwr = grcan_read_reg(&regs->rxwr);
+ u32 rxrd = grcan_read_reg(&regs->rxrd);
+
+ grcan_reset(dev);
+
+ /* Restore */
+ grcan_write_reg(&regs->txaddr, txaddr);
+ grcan_write_reg(&regs->txsize, txsize);
+ grcan_write_reg(&regs->txwr, txwr);
+ grcan_write_reg(&regs->txrd, txrd);
+ priv->eskbp = eskbp;
+
+ grcan_write_reg(&regs->rxaddr, rxaddr);
+ grcan_write_reg(&regs->rxsize, rxsize);
+ grcan_write_reg(&regs->rxwr, rxwr);
+ grcan_write_reg(&regs->rxrd, rxrd);
+
+ /* Turn on device again */
+ grcan_write_reg(&regs->imr, imr);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ grcan_write_reg(&regs->txctrl, GRCAN_TXCTRL_ENABLE
+ | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
+ ? GRCAN_TXCTRL_SINGLE : 0));
+ grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
+
+ /* Start queue if there is size and listen-onle mode is not
+ * enabled
+ */
+ if (grcan_txspace(priv->dma.tx.size, txwr, priv->eskbp) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ netdev_err(dev, "Device reset and restored\n");
+}
+
+/* Waiting time in usecs corresponding to the transmission of three maximum
+ * sized can frames in the given bitrate (in bits/sec). Waiting for this amount
+ * of time makes sure that the can controller have time to finish sending or
+ * receiving a frame with a good margin.
+ *
+ * usecs/sec * number of frames * bits/frame / bits/sec
+ */
+static inline u32 grcan_ongoing_wait_usecs(__u32 bitrate)
+{
+ return 1000000 * 3 * GRCAN_EFF_FRAME_MAX_BITS / bitrate;
+}
+
+/* Set timer so that it will not fire until after a period in which the can
+ * controller have a good margin to finish transmitting a frame unless it has
+ * hanged
+ */
+static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
+{
+ u32 wait_jiffies = usecs_to_jiffies(grcan_ongoing_wait_usecs(bitrate));
+
+ mod_timer(timer, jiffies + wait_jiffies);
+}
+
+/* Disable channels and schedule a running reset */
+static void grcan_initiate_running_reset(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ unsigned long flags;
+
+ netdev_err(dev, "Device seems hanged - reset scheduled\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* The main body of this function must never be executed again
+ * until after an execution of grcan_running_reset
+ */
+ if (!priv->resetting && !priv->closing) {
+ priv->resetting = true;
+ netif_stop_queue(dev);
+ grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+ grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_reset_timer(&priv->rr_timer, priv->can.bittiming.bitrate);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void grcan_free_dma_buffers(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_dma *dma = &priv->dma;
+
+ dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma->base_handle);
+ memset(dma, 0, sizeof(*dma));
+}
+
+static int grcan_allocate_dma_buffers(struct net_device *dev,
+ size_t tsize, size_t rsize)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_dma *dma = &priv->dma;
+ struct grcan_dma_buffer *large = rsize > tsize ? &dma->rx : &dma->tx;
+ struct grcan_dma_buffer *small = rsize > tsize ? &dma->tx : &dma->rx;
+ size_t shift;
+
+ /* Need a whole number of GRCAN_BUFFER_ALIGNMENT for the large,
+ * i.e. first buffer
+ */
+ size_t maxs = max(tsize, rsize);
+ size_t lsize = ALIGN(maxs, GRCAN_BUFFER_ALIGNMENT);
+
+ /* Put the small buffer after that */
+ size_t ssize = min(tsize, rsize);
+
+ /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
+ dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
+ dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_size,
+ &dma->base_handle,
+ GFP_KERNEL);
+
+ if (!dma->base_buf)
+ return -ENOMEM;
+
+ dma->tx.size = tsize;
+ dma->rx.size = rsize;
+
+ large->handle = ALIGN(dma->base_handle, GRCAN_BUFFER_ALIGNMENT);
+ small->handle = large->handle + lsize;
+ shift = large->handle - dma->base_handle;
+
+ large->buf = dma->base_buf + shift;
+ small->buf = large->buf + lsize;
+
+ return 0;
+}
+
+/* priv->lock *must* be held when calling this function */
+static int grcan_start(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 confop, txctrl;
+
+ grcan_reset(dev);
+
+ grcan_write_reg(&regs->txaddr, priv->dma.tx.handle);
+ grcan_write_reg(&regs->txsize, priv->dma.tx.size);
+ /* regs->txwr, regs->txrd and priv->eskbp already set to 0 by reset */
+
+ grcan_write_reg(&regs->rxaddr, priv->dma.rx.handle);
+ grcan_write_reg(&regs->rxsize, priv->dma.rx.size);
+ /* regs->rxwr and regs->rxrd already set to 0 by reset */
+
+ /* Enable interrupts */
+ grcan_read_reg(&regs->pir);
+ grcan_write_reg(&regs->imr, GRCAN_IRQ_DEFAULT);
+
+ /* Enable interfaces, channels and device */
+ confop = GRCAN_CONF_ABORT
+ | (priv->config.enable0 ? GRCAN_CONF_ENABLE0 : 0)
+ | (priv->config.enable1 ? GRCAN_CONF_ENABLE1 : 0)
+ | (priv->config.select ? GRCAN_CONF_SELECT : 0)
+ | (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY ?
+ GRCAN_CONF_SILENT : 0)
+ | (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
+ GRCAN_CONF_SAM : 0);
+ grcan_write_bits(&regs->conf, confop, GRCAN_CONF_OPERATION);
+ txctrl = GRCAN_TXCTRL_ENABLE
+ | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
+ ? GRCAN_TXCTRL_SINGLE : 0);
+ grcan_write_reg(&regs->txctrl, txctrl);
+ grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+}
+
+static int grcan_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err = 0;
+
+ if (mode == CAN_MODE_START) {
+ /* This might be called to restart the device to recover from
+ * bus off errors
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->closing || priv->resetting) {
+ err = -EBUSY;
+ } else {
+ netdev_info(dev, "Restarting device\n");
+ grcan_start(dev);
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return err;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int grcan_open(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_dma *dma = &priv->dma;
+ unsigned long flags;
+ int err;
+
+ /* Allocate memory */
+ err = grcan_allocate_dma_buffers(dev, priv->config.txsize,
+ priv->config.rxsize);
+ if (err) {
+ netdev_err(dev, "could not allocate DMA buffers\n");
+ return err;
+ }
+
+ priv->echo_skb = kzalloc(dma->tx.size * sizeof(*priv->echo_skb),
+ GFP_KERNEL);
+ if (!priv->echo_skb) {
+ err = -ENOMEM;
+ goto exit_free_dma_buffers;
+ }
+ priv->can.echo_skb_max = dma->tx.size;
+ priv->can.echo_skb = priv->echo_skb;
+
+ priv->txdlc = kzalloc(dma->tx.size * sizeof(*priv->txdlc), GFP_KERNEL);
+ if (!priv->txdlc) {
+ err = -ENOMEM;
+ goto exit_free_echo_skb;
+ }
+
+ /* Get can device up */
+ err = open_candev(dev);
+ if (err)
+ goto exit_free_txdlc;
+
+ err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (err)
+ goto exit_close_candev;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ napi_enable(&priv->napi);
+ grcan_start(dev);
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(dev);
+ priv->resetting = false;
+ priv->closing = false;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+
+exit_close_candev:
+ close_candev(dev);
+exit_free_txdlc:
+ kfree(priv->txdlc);
+exit_free_echo_skb:
+ kfree(priv->echo_skb);
+exit_free_dma_buffers:
+ grcan_free_dma_buffers(dev);
+ return err;
+}
+
+static int grcan_close(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ napi_disable(&priv->napi);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->closing = true;
+ if (priv->need_txbug_workaround) {
+ del_timer_sync(&priv->hang_timer);
+ del_timer_sync(&priv->rr_timer);
+ }
+ netif_stop_queue(dev);
+ grcan_stop_hardware(dev);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ grcan_free_dma_buffers(dev);
+ priv->can.echo_skb_max = 0;
+ priv->can.echo_skb = NULL;
+ kfree(priv->echo_skb);
+ kfree(priv->txdlc);
+
+ return 0;
+}
+
+static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int work_done;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ work_done = catch_up_echo_skb(dev, budget, true);
+ if (work_done) {
+ if (!priv->resetting && !priv->closing &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_wake_queue(dev);
+
+ /* With napi we don't get TX interrupts for a while,
+ * so prevent a running reset while catching up
+ */
+ if (priv->need_txbug_workaround)
+ del_timer(&priv->hang_timer);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return work_done;
+}
+
+static int grcan_receive(struct net_device *dev, int budget)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 wr, rd, startrd;
+ u32 *slot;
+ u32 i, rtr, eff, j, shift;
+ int work_done = 0;
+
+ rd = grcan_read_reg(&regs->rxrd);
+ startrd = rd;
+ for (work_done = 0; work_done < budget; work_done++) {
+ /* Check for packet to receive */
+ wr = grcan_read_reg(&regs->rxwr);
+ if (rd == wr)
+ break;
+
+ /* Take care of packet */
+ skb = alloc_can_skb(dev, &cf);
+ if (skb == NULL) {
+ netdev_err(dev,
+ "dropping frame: skb allocation failed\n");
+ stats->rx_dropped++;
+ continue;
+ }
+
+ slot = dma->rx.buf + rd;
+ eff = slot[0] & GRCAN_MSG_IDE;
+ rtr = slot[0] & GRCAN_MSG_RTR;
+ if (eff) {
+ cf->can_id = ((slot[0] & GRCAN_MSG_EID)
+ >> GRCAN_MSG_EID_BIT);
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id = ((slot[0] & GRCAN_MSG_BID)
+ >> GRCAN_MSG_BID_BIT);
+ }
+ cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC)
+ >> GRCAN_MSG_DLC_BIT);
+ if (rtr) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++) {
+ j = GRCAN_MSG_DATA_SLOT_INDEX(i);
+ shift = GRCAN_MSG_DATA_SHIFT(i);
+ cf->data[i] = (u8)(slot[j] >> shift);
+ }
+ }
+ netif_receive_skb(skb);
+
+ /* Update statistics and read pointer */
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
+ }
+
+ /* Make sure everything is read before allowing hardware to
+ * use the memory
+ */
+ mb();
+
+ /* Update read pointer - no need to check for ongoing */
+ if (likely(rd != startrd))
+ grcan_write_reg(&regs->rxrd, rd);
+
+ return work_done;
+}
+
+static int grcan_poll(struct napi_struct *napi, int budget)
+{
+ struct grcan_priv *priv = container_of(napi, struct grcan_priv, napi);
+ struct net_device *dev = priv->dev;
+ struct grcan_registers __iomem *regs = priv->regs;
+ unsigned long flags;
+ int tx_work_done, rx_work_done;
+ int rx_budget = budget / 2;
+ int tx_budget = budget - rx_budget;
+
+ /* Half of the budget for receiveing messages */
+ rx_work_done = grcan_receive(dev, rx_budget);
+
+ /* Half of the budget for transmitting messages as that can trigger echo
+ * frames being received
+ */
+ tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+
+ if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ napi_complete(napi);
+
+ /* Guarantee no interference with a running reset that otherwise
+ * could turn off interrupts.
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Enable tx and rx interrupts again. No need to check
+ * priv->closing as napi_disable in grcan_close is waiting for
+ * scheduled napi calls to finish.
+ */
+ grcan_set_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return rx_work_done + tx_work_done;
+}
+
+/* Work tx bug by waiting while for the risky situation to clear. If that fails,
+ * drop a frame in one-shot mode or indicate a busy device otherwise.
+ *
+ * Returns 0 on successful wait. Otherwise it sets *netdev_tx_status to the
+ * value that should be returned by grcan_start_xmit when aborting the xmit.
+ */
+static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb,
+ u32 txwr, u32 oneshotmode,
+ netdev_tx_t *netdev_tx_status)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ int i;
+ unsigned long flags;
+
+ /* Wait a while for ongoing to be cleared or read pointer to catch up to
+ * write pointer. The latter is needed due to a bug in older versions of
+ * GRCAN in which ONGOING is not cleared properly one-shot mode when a
+ * transmission fails.
+ */
+ for (i = 0; i < GRCAN_SHORTWAIT_USECS; i++) {
+ udelay(1);
+ if (!grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ONGOING) ||
+ grcan_read_reg(&regs->txrd) == txwr) {
+ return 0;
+ }
+ }
+
+ /* Clean up, in case the situation was not resolved */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!priv->resetting && !priv->closing) {
+ /* Queue might have been stopped earlier in grcan_start_xmit */
+ if (grcan_txspace(dma->tx.size, txwr, priv->eskbp))
+ netif_wake_queue(dev);
+ /* Set a timer to resolve a hanged tx controller */
+ if (!timer_pending(&priv->hang_timer))
+ grcan_reset_timer(&priv->hang_timer,
+ priv->can.bittiming.bitrate);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (oneshotmode) {
+ /* In one-shot mode we should never end up here because
+ * then the interrupt handler increases txrd on TXLOSS,
+ * but it is consistent with one-shot mode to drop the
+ * frame in this case.
+ */
+ kfree_skb(skb);
+ *netdev_tx_status = NETDEV_TX_OK;
+ } else {
+ /* In normal mode the socket-can transmission queue get
+ * to keep the frame so that it can be retransmitted
+ * later
+ */
+ *netdev_tx_status = NETDEV_TX_BUSY;
+ }
+ return -EBUSY;
+}
+
+/* Notes on the tx cyclic buffer handling:
+ *
+ * regs->txwr - the next slot for the driver to put data to be sent
+ * regs->txrd - the next slot for the device to read data
+ * priv->eskbp - the next slot for the driver to call can_put_echo_skb for
+ *
+ * grcan_start_xmit can enter more messages as long as regs->txwr does
+ * not reach priv->eskbp (within 1 message gap)
+ *
+ * The device sends messages until regs->txrd reaches regs->txwr
+ *
+ * The interrupt calls handler calls can_put_echo_skb until
+ * priv->eskbp reaches regs->txrd
+ */
+static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 id, txwr, txrd, space, txctrl;
+ int slotindex;
+ u32 *slot;
+ u32 i, rtr, eff, dlc, tmp, err;
+ int j, shift;
+ unsigned long flags;
+ u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* Trying to transmit in silent mode will generate error interrupts, but
+ * this should never happen - the queue should not have been started.
+ */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ return NETDEV_TX_BUSY;
+
+ /* Reads of priv->eskbp and shut-downs of the queue needs to
+ * be atomic towards the updates to priv->eskbp and wake-ups
+ * of the queue in the interrupt handler.
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ txwr = grcan_read_reg(&regs->txwr);
+ space = grcan_txspace(dma->tx.size, txwr, priv->eskbp);
+
+ slotindex = txwr / GRCAN_MSG_SIZE;
+ slot = dma->tx.buf + txwr;
+
+ if (unlikely(space == 1))
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ /* End of critical section*/
+
+ /* This should never happen. If circular buffer is full, the
+ * netif_stop_queue should have been stopped already.
+ */
+ if (unlikely(!space)) {
+ netdev_err(dev, "No buffer space, but queue is non-stopped.\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Convert and write CAN message to DMA buffer */
+ eff = cf->can_id & CAN_EFF_FLAG;
+ rtr = cf->can_id & CAN_RTR_FLAG;
+ id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK);
+ dlc = cf->can_dlc;
+ if (eff)
+ tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID;
+ else
+ tmp = (id << GRCAN_MSG_BID_BIT) & GRCAN_MSG_BID;
+ slot[0] = (eff ? GRCAN_MSG_IDE : 0) | (rtr ? GRCAN_MSG_RTR : 0) | tmp;
+
+ slot[1] = ((dlc << GRCAN_MSG_DLC_BIT) & GRCAN_MSG_DLC);
+ slot[2] = 0;
+ slot[3] = 0;
+ for (i = 0; i < dlc; i++) {
+ j = GRCAN_MSG_DATA_SLOT_INDEX(i);
+ shift = GRCAN_MSG_DATA_SHIFT(i);
+ slot[j] |= cf->data[i] << shift;
+ }
+
+ /* Checking that channel has not been disabled. These cases
+ * should never happen
+ */
+ txctrl = grcan_read_reg(&regs->txctrl);
+ if (!(txctrl & GRCAN_TXCTRL_ENABLE))
+ netdev_err(dev, "tx channel spuriously disabled\n");
+
+ if (oneshotmode && !(txctrl & GRCAN_TXCTRL_SINGLE))
+ netdev_err(dev, "one-shot mode spuriously disabled\n");
+
+ /* Bug workaround for old version of grcan where updating txwr
+ * in the same clock cycle as the controller updates txrd to
+ * the current txwr could hang the can controller
+ */
+ if (priv->need_txbug_workaround) {
+ txrd = grcan_read_reg(&regs->txrd);
+ if (unlikely(grcan_ring_sub(txwr, txrd, dma->tx.size) == 1)) {
+ netdev_tx_t txstatus;
+
+ err = grcan_txbug_workaround(dev, skb, txwr,
+ oneshotmode, &txstatus);
+ if (err)
+ return txstatus;
+ }
+ }
+
+ /* Prepare skb for echoing. This must be after the bug workaround above
+ * as ownership of the skb is passed on by calling can_put_echo_skb.
+ * Returning NETDEV_TX_BUSY or accessing skb or cf after a call to
+ * can_put_echo_skb would be an error unless other measures are
+ * taken.
+ */
+ priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */
+ can_put_echo_skb(skb, dev, slotindex);
+
+ /* Make sure everything is written before allowing hardware to
+ * read from the memory
+ */
+ wmb();
+
+ /* Update write pointer to start transmission */
+ grcan_write_reg(&regs->txwr,
+ grcan_ring_add(txwr, GRCAN_MSG_SIZE, dma->tx.size));
+
+ return NETDEV_TX_OK;
+}
+
+/* ========== Setting up sysfs interface and module parameters ========== */
+
+#define GRCAN_NOT_BOOL(unsigned_val) ((unsigned_val) > 1)
+
+#define GRCAN_MODULE_PARAM(name, mtype, valcheckf, desc) \
+ static void grcan_sanitize_##name(struct platform_device *pd) \
+ { \
+ struct grcan_device_config grcan_default_config \
+ = GRCAN_DEFAULT_DEVICE_CONFIG; \
+ if (valcheckf(grcan_module_config.name)) { \
+ dev_err(&pd->dev, \
+ "Invalid module parameter value for " \
+ #name " - setting default\n"); \
+ grcan_module_config.name = \
+ grcan_default_config.name; \
+ } \
+ } \
+ module_param_named(name, grcan_module_config.name, \
+ mtype, S_IRUGO); \
+ MODULE_PARM_DESC(name, desc)
+
+#define GRCAN_CONFIG_ATTR(name, desc) \
+ static ssize_t grcan_store_##name(struct device *sdev, \
+ struct device_attribute *att, \
+ const char *buf, \
+ size_t count) \
+ { \
+ struct net_device *dev = to_net_dev(sdev); \
+ struct grcan_priv *priv = netdev_priv(dev); \
+ u8 val; \
+ int ret; \
+ if (dev->flags & IFF_UP) \
+ return -EBUSY; \
+ ret = kstrtou8(buf, 0, &val); \
+ if (ret < 0 || val > 1) \
+ return -EINVAL; \
+ priv->config.name = val; \
+ return count; \
+ } \
+ static ssize_t grcan_show_##name(struct device *sdev, \
+ struct device_attribute *att, \
+ char *buf) \
+ { \
+ struct net_device *dev = to_net_dev(sdev); \
+ struct grcan_priv *priv = netdev_priv(dev); \
+ return sprintf(buf, "%d\n", priv->config.name); \
+ } \
+ static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
+ grcan_show_##name, \
+ grcan_store_##name); \
+ GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc)
+
+/* The following configuration options are made available both via module
+ * parameters and writable sysfs files. See the chapter about GRCAN in the
+ * documentation for the GRLIB VHDL library for further details.
+ */
+GRCAN_CONFIG_ATTR(enable0,
+ "Configuration of physical interface 0. Determines\n" \
+ "the \"Enable 0\" bit of the configuration register.\n" \
+ "Format: 0 | 1\nDefault: 0\n");
+
+GRCAN_CONFIG_ATTR(enable1,
+ "Configuration of physical interface 1. Determines\n" \
+ "the \"Enable 1\" bit of the configuration register.\n" \
+ "Format: 0 | 1\nDefault: 0\n");
+
+GRCAN_CONFIG_ATTR(select,
+ "Select which physical interface to use.\n" \
+ "Format: 0 | 1\nDefault: 0\n");
+
+/* The tx and rx buffer size configuration options are only available via module
+ * parameters.
+ */
+GRCAN_MODULE_PARAM(txsize, uint, GRCAN_INVALID_BUFFER_SIZE,
+ "Sets the size of the tx buffer.\n" \
+ "Format: <unsigned int> where (txsize & ~0x1fffc0) == 0\n" \
+ "Default: 1024\n");
+GRCAN_MODULE_PARAM(rxsize, uint, GRCAN_INVALID_BUFFER_SIZE,
+ "Sets the size of the rx buffer.\n" \
+ "Format: <unsigned int> where (size & ~0x1fffc0) == 0\n" \
+ "Default: 1024\n");
+
+/* Function that makes sure that configuration done using
+ * module parameters are set to valid values
+ */
+static void grcan_sanitize_module_config(struct platform_device *ofdev)
+{
+ grcan_sanitize_enable0(ofdev);
+ grcan_sanitize_enable1(ofdev);
+ grcan_sanitize_select(ofdev);
+ grcan_sanitize_txsize(ofdev);
+ grcan_sanitize_rxsize(ofdev);
+}
+
+static const struct attribute *const sysfs_grcan_attrs[] = {
+ /* Config attrs */
+ &dev_attr_enable0.attr,
+ &dev_attr_enable1.attr,
+ &dev_attr_select.attr,
+ NULL,
+};
+
+static const struct attribute_group sysfs_grcan_group = {
+ .name = "grcan",
+ .attrs = (struct attribute **)sysfs_grcan_attrs,
+};
+
+/* ========== Setting up the driver ========== */
+
+static const struct net_device_ops grcan_netdev_ops = {
+ .ndo_open = grcan_open,
+ .ndo_stop = grcan_close,
+ .ndo_start_xmit = grcan_start_xmit,
+};
+
+static int grcan_setup_netdev(struct platform_device *ofdev,
+ void __iomem *base,
+ int irq, u32 ambafreq, bool txbug)
+{
+ struct net_device *dev;
+ struct grcan_priv *priv;
+ struct grcan_registers __iomem *regs;
+ int err;
+
+ dev = alloc_candev(sizeof(struct grcan_priv), 0);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq;
+ dev->flags |= IFF_ECHO;
+ dev->netdev_ops = &grcan_netdev_ops;
+ dev->sysfs_groups[0] = &sysfs_grcan_group;
+
+ priv = netdev_priv(dev);
+ memcpy(&priv->config, &grcan_module_config,
+ sizeof(struct grcan_device_config));
+ priv->dev = dev;
+ priv->regs = base;
+ priv->can.bittiming_const = &grcan_bittiming_const;
+ priv->can.do_set_bittiming = grcan_set_bittiming;
+ priv->can.do_set_mode = grcan_set_mode;
+ priv->can.do_get_berr_counter = grcan_get_berr_counter;
+ priv->can.clock.freq = ambafreq;
+ priv->can.ctrlmode_supported =
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT;
+ priv->need_txbug_workaround = txbug;
+
+ /* Discover if triple sampling is supported by hardware */
+ regs = priv->regs;
+ grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
+ grcan_set_bits(&regs->conf, GRCAN_CONF_SAM);
+ if (grcan_read_bits(&regs->conf, GRCAN_CONF_SAM)) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ dev_dbg(&ofdev->dev, "Hardware supports triple-sampling\n");
+ }
+
+ spin_lock_init(&priv->lock);
+
+ if (priv->need_txbug_workaround) {
+ init_timer(&priv->rr_timer);
+ priv->rr_timer.function = grcan_running_reset;
+ priv->rr_timer.data = (unsigned long)dev;
+
+ init_timer(&priv->hang_timer);
+ priv->hang_timer.function = grcan_initiate_running_reset;
+ priv->hang_timer.data = (unsigned long)dev;
+ }
+
+ netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+ dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
+ priv->regs, dev->irq, priv->can.clock.freq);
+
+ err = register_candev(dev);
+ if (err)
+ goto exit_free_candev;
+
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* Reset device to allow bit-timing to be set. No need to call
+ * grcan_reset at this stage. That is done in grcan_open.
+ */
+ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_RESET);
+
+ return 0;
+exit_free_candev:
+ free_candev(dev);
+ return err;
+}
+
+static int __devinit grcan_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource *res;
+ u32 sysid, ambafreq;
+ int irq, err;
+ void __iomem *base;
+ bool txbug = true;
+
+ /* Compare GRLIB version number with the first that does not
+ * have the tx bug (see start_xmit)
+ */
+ err = of_property_read_u32(np, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK)
+ >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+
+ err = of_property_read_u32(np, "freq", &ambafreq);
+ if (err) {
+ dev_err(&ofdev->dev, "unable to fetch \"freq\" property\n");
+ goto exit_error;
+ }
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ base = devm_request_and_ioremap(&ofdev->dev, res);
+ if (!base) {
+ dev_err(&ofdev->dev, "couldn't map IO resource\n");
+ err = -EADDRNOTAVAIL;
+ goto exit_error;
+ }
+
+ irq = irq_of_parse_and_map(np, GRCAN_IRQIX_IRQ);
+ if (!irq) {
+ dev_err(&ofdev->dev, "no irq found\n");
+ err = -ENODEV;
+ goto exit_error;
+ }
+
+ grcan_sanitize_module_config(ofdev);
+
+ err = grcan_setup_netdev(ofdev, base, irq, ambafreq, txbug);
+ if (err)
+ goto exit_dispose_irq;
+
+ return 0;
+
+exit_dispose_irq:
+ irq_dispose_mapping(irq);
+exit_error:
+ dev_err(&ofdev->dev,
+ "%s socket CAN driver initialization failed with error %d\n",
+ DRV_NAME, err);
+ return err;
+}
+
+static int __devexit grcan_remove(struct platform_device *ofdev)
+{
+ struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+ struct grcan_priv *priv = netdev_priv(dev);
+
+ unregister_candev(dev); /* Will in turn call grcan_close */
+
+ irq_dispose_mapping(dev->irq);
+ dev_set_drvdata(&ofdev->dev, NULL);
+ netif_napi_del(&priv->napi);
+ free_candev(dev);
+
+ return 0;
+}
+
+static struct of_device_id grcan_match[] __devinitconst = {
+ {.name = "GAISLER_GRCAN"},
+ {.name = "01_03d"},
+ {.name = "GAISLER_GRHCAN"},
+ {.name = "01_034"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, grcan_match);
+
+static struct platform_driver grcan_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = grcan_match,
+ },
+ .probe = grcan_probe,
+ .remove = __devexit_p(grcan_remove),
+};
+
+module_platform_driver(grcan_driver);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Socket CAN driver for Aeroflex Gaisler GRCAN");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 7edadee..c0bfb0a 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1692,7 +1692,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
return ret;
ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
- if (ret <= 0) {
+ if (ret == 0) {
dev_info(mod->dev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1718,7 +1718,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
return ret;
ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
- if (ret <= 0) {
+ if (ret == 0) {
dev_info(mod->dev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 03df9a8..559be87 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -93,6 +93,7 @@ config CAN_PLX_PCI
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
- IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/)
+ - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8bc9598..dc04407 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -44,6 +44,7 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"esd CAN-PCI/CPCI/PCI104/200, "
"esd CAN-PCI/PMC/266, "
"esd CAN-PCIe/2000, "
+ "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
"IXXAT PC-I 04/PCI")
MODULE_LICENSE("GPL v2");
@@ -131,6 +132,9 @@ struct plx_pci_card {
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
+#define CTI_PCI_VENDOR_ID 0x12c4
+#define CTI_PCI_DEVICE_ID_CRG001 0x0900
+
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
@@ -222,6 +226,14 @@ static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
/* based on PLX9030 */
};
+static struct plx_pci_card_info plx_pci_card_info_cti __devinitdata = {
+ "Connect Tech Inc. CANpro/104-Plus Opto (CRG001)", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -300,6 +312,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_tews
},
+ {
+ /* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_cti
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index bd36e55..124e0dd 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1,7 +1,7 @@
/*
- * CAN driver for esd CAN-USB/2
+ * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
*
- * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published
@@ -28,14 +28,16 @@
#include <linux/can/error.h>
MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces");
+MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
MODULE_LICENSE("GPL v2");
/* Define these values to match your devices */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010
+#define USB_CANUSBM_PRODUCT_ID 0x0011
#define ESD_USB2_CAN_CLOCK 60000000
+#define ESD_USBM_CAN_CLOCK 36000000
#define ESD_USB2_MAX_NETS 2
/* USB2 commands */
@@ -69,6 +71,7 @@ MODULE_LICENSE("GPL v2");
#define ESD_USB2_TSEG2_SHIFT 20
#define ESD_USB2_SJW_MAX 4
#define ESD_USB2_SJW_SHIFT 14
+#define ESD_USBM_SJW_SHIFT 24
#define ESD_USB2_BRP_MIN 1
#define ESD_USB2_BRP_MAX 1024
#define ESD_USB2_BRP_INC 1
@@ -183,6 +186,7 @@ struct __attribute__ ((packed)) esd_usb2_msg {
static struct usb_device_id esd_usb2_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
+ {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
{}
};
MODULE_DEVICE_TABLE(usb, esd_usb2_table);
@@ -889,11 +893,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
struct can_bittiming *bt = &priv->can.bittiming;
struct esd_usb2_msg msg;
u32 canbtr;
+ int sjw_shift;
canbtr = ESD_USB2_UBR;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ canbtr |= ESD_USB2_LOM;
+
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
+
+ if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+ USB_CANUSBM_PRODUCT_ID)
+ sjw_shift = ESD_USBM_SJW_SHIFT;
+ else
+ sjw_shift = ESD_USB2_SJW_SHIFT;
+
canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
- << ESD_USB2_SJW_SHIFT;
+ << sjw_shift;
canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
& (ESD_USB2_TSEG1_MAX - 1))
<< ESD_USB2_TSEG1_SHIFT;
@@ -971,12 +986,20 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->index = index;
priv->can.state = CAN_STATE_STOPPED;
- priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
+ USB_CANUSBM_PRODUCT_ID)
+ priv->can.clock.freq = ESD_USBM_CAN_CLOCK;
+ else {
+ priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ }
+
priv->can.bittiming_const = &esd_usb2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming;
priv->can.do_set_mode = esd_usb2_set_mode;
priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 49a30d3..e49c0ef 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,7 +61,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
- depends on BFIN_MAC && BF518
+ select PTP_1588_CLOCK
default y
---help---
To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index f816426..f1c458d 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,14 +548,17 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_SYS_HARDWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = lp->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -566,6 +569,7 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
return 0;
}
+#endif
static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_settings = bfin_mac_ethtool_getsettings,
@@ -574,7 +578,9 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
.get_wol = bfin_mac_ethtool_getwol,
.set_wol = bfin_mac_ethtool_setwol,
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
.get_ts_info = bfin_mac_ethtool_get_ts_info,
+#endif
};
/**************************************************************************/
@@ -649,6 +655,20 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
+static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
+{
+ u32 ipn = 1000000000UL / input_clk;
+ u32 ppn = 1;
+ unsigned int shift = 0;
+
+ while (ppn <= ipn) {
+ ppn <<= 1;
+ shift++;
+ }
+ *shift_result = shift;
+ return 1000000000UL / ppn;
+}
+
static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -798,19 +818,7 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
bfin_read_EMAC_PTP_TXSNAPLO();
bfin_read_EMAC_PTP_TXSNAPHI();
- /*
- * Set registers so that rollover occurs soon to test this.
- */
- bfin_write_EMAC_PTP_TIMELO(0x00000000);
- bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
-
SSYNC();
-
- lp->compare.last_update = 0;
- timecounter_init(&lp->clock,
- &lp->cycles,
- ktime_to_ns(ktime_get_real()));
- timecompare_update(&lp->compare, 0);
}
lp->stamp_cfg = config;
@@ -818,15 +826,6 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
-static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
-{
- ktime_t sys = ktime_get_real();
-
- pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
- __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
- sys.tv.nsec, cmp->offset, cmp->skew);
-}
-
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -857,15 +856,9 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
regval = bfin_read_EMAC_PTP_TXSNAPLO();
regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ns = timecounter_cyc2time(&lp->clock,
- regval);
- timecompare_update(&lp->compare, ns);
+ ns = regval << lp->shift;
shhwtstamps.hwtstamp = ns_to_ktime(ns);
- shhwtstamps.syststamp =
- timecompare_transform(&lp->compare, ns);
skb_tstamp_tx(skb, &shhwtstamps);
-
- bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
}
}
}
@@ -888,55 +881,184 @@ static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
regval = bfin_read_EMAC_PTP_RXSNAPLO();
regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
- ns = timecounter_cyc2time(&lp->clock, regval);
- timecompare_update(&lp->compare, ns);
+ ns = regval << lp->shift;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(ns);
- shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+}
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u64 addend, ppb;
+ u32 input_clk, phc_clk;
+
+ /* Initialize hardware timer */
+ input_clk = get_sclk();
+ phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
+ addend = phc_clk * (1ULL << 32);
+ do_div(addend, input_clk);
+ bfin_write_EMAC_PTP_ADDEND((u32)addend);
+
+ lp->addend = addend;
+ ppb = 1000000000ULL * input_clk;
+ do_div(ppb, phc_clk);
+ lp->max_ppb = ppb - 1000000000ULL - 1ULL;
- bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+ /* Initialize hwstamp config */
+ lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
}
-/*
- * bfin_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
{
- u64 stamp;
+ u64 ns;
+ u32 lo, hi;
+
+ lo = bfin_read_EMAC_PTP_TIMELO();
+ hi = bfin_read_EMAC_PTP_TIMEHI();
- stamp = bfin_read_EMAC_PTP_TIMELO();
- stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= lp->shift;
- return stamp;
+ return ns;
}
-#define PTP_CLK 25000000
+static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
+{
+ u32 hi, lo;
-static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+ ns >>= lp->shift;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+
+ bfin_write_EMAC_PTP_TIMELO(lo);
+ bfin_write_EMAC_PTP_TIMEHI(hi);
+}
+
+/* PTP Hardware Clock operations */
+
+static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, addend;
+ int neg_adj = 0;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ addend = lp->addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ bfin_write_EMAC_PTP_ADDEND(addend);
+
+ return 0;
+}
+
+static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ spin_lock_irqsave(&lp->phc_lock, flags);
+
+ now = bfin_ptp_time_read(lp);
+ now += delta;
+ bfin_ptp_time_write(lp, now);
+
+ spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+ return 0;
+}
+
+static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ spin_lock_irqsave(&lp->phc_lock, flags);
+
+ ns = bfin_ptp_time_read(lp);
+
+ spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return 0;
+}
+
+static int bfin_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&lp->phc_lock, flags);
+
+ bfin_ptp_time_write(lp, ns);
+
+ spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+ return 0;
+}
+
+static int bfin_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info bfin_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "BF518 clock",
+ .max_adj = 0,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = bfin_ptp_adjfreq,
+ .adjtime = bfin_ptp_adjtime,
+ .gettime = bfin_ptp_gettime,
+ .settime = bfin_ptp_settime,
+ .enable = bfin_ptp_enable,
+};
+
+static int bfin_phc_init(struct net_device *netdev, struct device *dev)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
- u64 append;
- /* Initialize hardware timer */
- append = PTP_CLK * (1ULL << 32);
- do_div(append, get_sclk());
- bfin_write_EMAC_PTP_ADDEND((u32)append);
-
- memset(&lp->cycles, 0, sizeof(lp->cycles));
- lp->cycles.read = bfin_read_clock;
- lp->cycles.mask = CLOCKSOURCE_MASK(64);
- lp->cycles.mult = 1000000000 / PTP_CLK;
- lp->cycles.shift = 0;
-
- /* Synchronize our NIC clock against system wall clock */
- memset(&lp->compare, 0, sizeof(lp->compare));
- lp->compare.source = &lp->clock;
- lp->compare.target = ktime_get_real;
- lp->compare.num_samples = 10;
+ lp->caps = bfin_ptp_caps;
+ lp->caps.max_adj = lp->max_ppb;
+ lp->clock = ptp_clock_register(&lp->caps, dev);
+ if (IS_ERR(lp->clock))
+ return PTR_ERR(lp->clock);
- /* Initialize hwstamp config */
- lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+ lp->phc_index = ptp_clock_index(lp->clock);
+ spin_lock_init(&lp->phc_lock);
+
+ return 0;
+}
+
+static void bfin_phc_release(struct bfin_mac_local *lp)
+{
+ ptp_clock_unregister(lp->clock);
}
#else
@@ -945,6 +1067,8 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
+# define bfin_phc_init(netdev, dev) 0
+# define bfin_phc_release(lp)
#endif
static inline void _tx_reclaim_skb(void)
@@ -1579,12 +1703,17 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
}
bfin_mac_hwtstamp_init(ndev);
+ if (bfin_phc_init(ndev, &pdev->dev)) {
+ dev_err(&pdev->dev, "Cannot register PHC device!\n");
+ goto out_err_phc;
+ }
/* now, print out the card info, in a short format.. */
netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
return 0;
+out_err_phc:
out_err_reg_ndev:
free_irq(IRQ_MAC_RX, ndev);
out_err_request_irq:
@@ -1603,6 +1732,8 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct bfin_mac_local *lp = netdev_priv(ndev);
+ bfin_phc_release(lp);
+
platform_set_drvdata(pdev, NULL);
lp->mii_bus->priv = NULL;
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 960905c..7a07ee0 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -11,8 +11,7 @@
#define _BFIN_MAC_H_
#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/timecompare.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/timer.h>
#include <linux/etherdevice.h>
#include <linux/bfin_mac.h>
@@ -94,10 +93,14 @@ struct bfin_mac_local {
struct mii_bus *mii_bus;
#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
- struct cyclecounter cycles;
- struct timecounter clock;
- struct timecompare compare;
+ u32 addend;
+ unsigned int shift;
+ s32 max_ppb;
struct hwtstamp_config stamp_cfg;
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ int phc_index;
+ spinlock_t phc_lock; /* protects time lo/hi registers */
#endif
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 72897c4..de121cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,18 +34,10 @@
#include "bnx2x_hsi.h"
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
-#define BCM_CNIC 1
#include "../cnic_if.h"
-#endif
-#ifdef BCM_CNIC
-#define BNX2X_MIN_MSIX_VEC_CNT 3
-#define BNX2X_MSIX_VEC_FP_START 2
-#else
-#define BNX2X_MIN_MSIX_VEC_CNT 2
-#define BNX2X_MSIX_VEC_FP_START 1
-#endif
+
+#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
#include <linux/mdio.h>
@@ -256,15 +248,10 @@ enum {
/* FCoE L2 */
#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
-/** Additional rings budgeting */
-#ifdef BCM_CNIC
-#define CNIC_PRESENT 1
-#define FCOE_PRESENT 1
-#else
-#define CNIC_PRESENT 0
-#define FCOE_PRESENT 0
-#endif /* BCM_CNIC */
-#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
+#define CNIC_SUPPORT(bp) ((bp)->cnic_support)
+#define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
+#define CNIC_LOADED(bp) ((bp)->cnic_loaded)
+#define FCOE_INIT(bp) ((bp)->fcoe_init)
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -297,9 +284,7 @@ enum {
OOO_TXQ_IDX_OFFSET,
};
#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
-#ifdef BCM_CNIC
#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
-#endif
/* fast path */
/*
@@ -585,15 +570,9 @@ struct bnx2x_fastpath {
->var)
-#define IS_ETH_FP(fp) (fp->index < \
- BNX2X_NUM_ETH_QUEUES(fp->bp))
-#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
-#else
-#define IS_FCOE_FP(fp) false
-#define IS_FCOE_IDX(idx) false
-#endif
+#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
+#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
/* MC hsi */
@@ -886,6 +865,18 @@ struct bnx2x_common {
(CHIP_REV(bp) == CHIP_REV_Bx))
#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax))
+/* This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either the
+ * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
+ * registered for this port (which means that the user wants storage services).
+ * 2. During cnic-related load, to know if offload mode is already configured in
+ * the HW or needs to be configrued.
+ * Since the transition from nic-mode to offload-mode in HW causes traffic
+ * coruption, nic-mode is configured only in ports on which storage services
+ * where never requested.
+ */
+#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
int flash_size;
#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -1003,18 +994,15 @@ union cdu_context {
#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
-#ifdef BCM_CNIC
#define CNIC_ISCSI_CID_MAX 256
#define CNIC_FCOE_CID_MAX 2048
#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
-#endif
#define QM_ILT_PAGE_SZ_HW 0
#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
#define QM_CID_ROUND 1024
-#ifdef BCM_CNIC
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
@@ -1032,8 +1020,6 @@ union cdu_context {
#define SRC_T2_SZ SRC_ILT_SZ
#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
-#endif
-
#define MAX_DMAE_C 8
/* DMA memory not used in fastpath */
@@ -1227,7 +1213,6 @@ struct bnx2x {
struct bnx2x_sp_objs *sp_objs;
struct bnx2x_fp_stats *fp_stats;
struct bnx2x_fp_txdata *bnx2x_txq;
- int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1350,6 +1335,15 @@ struct bnx2x {
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+ u8 cnic_support;
+ bool cnic_enabled;
+ bool cnic_loaded;
+
+ /* Flag that indicates that we can start looking for FCoE L2 queue
+ * completions in the default status block.
+ */
+ bool fcoe_init;
+
int pm_cap;
int mrrs;
@@ -1420,6 +1414,8 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ uint num_ethernet_queues;
+ uint num_cnic_queues;
int num_napi_queues;
int disable_tpa;
@@ -1433,6 +1429,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+ u8 min_msix_vec_cnt;
dma_addr_t def_status_blk_mapping;
@@ -1478,16 +1475,16 @@ struct bnx2x {
* Maximum supported number of RSS queues: number of IGU SBs minus one that goes
* to CNIC.
*/
-#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
+#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
/*
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+ + 2 * CNIC_SUPPORT(bp))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+ + 2 * CNIC_SUPPORT(bp))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
@@ -1495,9 +1492,6 @@ struct bnx2x {
int dropless_fc;
-#ifdef BCM_CNIC
- u32 cnic_flags;
-#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
struct cnic_ops __rcu *cnic_ops;
@@ -1518,7 +1512,6 @@ struct bnx2x {
/* Start index of the "special" (CNIC related) L2 cleints */
u8 cnic_base_cl_id;
-#endif
int dmae_ready;
/* used to synchronize dmae accesses */
@@ -1647,9 +1640,9 @@ struct bnx2x {
/* Tx queues may be less or equal to Rx queues */
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
-#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
- NON_ETH_CONTEXT_USE)
+ (bp)->num_cnic_queues)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1689,6 +1682,13 @@ struct bnx2x_func_init_params {
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
+#define for_each_cnic_queue(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_eth_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
@@ -1702,6 +1702,22 @@ struct bnx2x_func_init_params {
else
/* Skip forwarding FP */
+#define for_each_valid_rx_queue(bp, var) \
+ for ((var) = 0; \
+ (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
+ BNX2X_NUM_ETH_QUEUES(bp)); \
+ (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_rx_queue_cnic(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_rx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_rx_queue(bp, var)) \
@@ -1709,6 +1725,22 @@ struct bnx2x_func_init_params {
else
/* Skip OOO FP */
+#define for_each_valid_tx_queue(bp, var) \
+ for ((var) = 0; \
+ (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
+ BNX2X_NUM_ETH_QUEUES(bp)); \
+ (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_tx_queue_cnic(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_tx_queue(bp, var)) \
@@ -2179,7 +2211,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
-#ifdef BCM_CNIC
#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
(BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
@@ -2196,9 +2227,12 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
-#else
-#define IS_MF_FCOE_AFEX(bp) false
-#endif
+enum {
+ SWITCH_UPDATE,
+ AFEX_UPDATE,
+};
+
+#define NUM_MACS 8
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a..54d522d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1152,6 +1152,25 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
}
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue_cnic(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ fp->rx_bd_cons = 0;
+
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+ }
+}
+
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -1159,7 +1178,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
int i, j;
/* Allocate TPA resources */
- for_each_rx_queue(bp, j) {
+ for_each_eth_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
DP(NETIF_MSG_IFUP,
@@ -1217,7 +1236,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
- for_each_rx_queue(bp, j) {
+ for_each_eth_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
@@ -1244,29 +1263,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
-static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
{
- int i;
u8 cos;
+ struct bnx2x *bp = fp->bp;
- for_each_tx_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
- unsigned pkts_compl = 0, bytes_compl = 0;
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
- u16 sw_prod = txdata->tx_pkt_prod;
- u16 sw_cons = txdata->tx_pkt_cons;
+ u16 sw_prod = txdata->tx_pkt_prod;
+ u16 sw_cons = txdata->tx_pkt_cons;
- while (sw_cons != sw_prod) {
- bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
- &pkts_compl, &bytes_compl);
- sw_cons++;
- }
- netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev,
- txdata->txq_index));
+ while (sw_cons != sw_prod) {
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
+ sw_cons++;
}
+
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
+ }
+}
+
+static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue_cnic(bp, i) {
+ bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+ }
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i) {
+ bnx2x_free_tx_skbs_queue(&bp->fp[i]);
}
}
@@ -1294,11 +1329,20 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
}
}
+static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue_cnic(bp, j) {
+ bnx2x_free_rx_bds(&bp->fp[j]);
+ }
+}
+
static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
int j;
- for_each_rx_queue(bp, j) {
+ for_each_eth_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
bnx2x_free_rx_bds(fp);
@@ -1308,6 +1352,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
+void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+{
+ bnx2x_free_tx_skbs_cnic(bp);
+ bnx2x_free_rx_skbs_cnic(bp);
+}
+
void bnx2x_free_skbs(struct bnx2x *bp)
{
bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1397,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
bp->msix_table[offset].vector);
offset++;
-#ifdef BCM_CNIC
- if (nvecs == offset)
- return;
- offset++;
-#endif
+
+ if (CNIC_SUPPORT(bp)) {
+ if (nvecs == offset)
+ return;
+ offset++;
+ }
for_each_eth_queue(bp, i) {
if (nvecs == offset)
@@ -1368,7 +1419,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
if (bp->flags & USING_MSIX_FLAG &&
!(bp->flags & USING_SINGLE_MSIX_FLAG))
bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
- CNIC_PRESENT + 1);
+ CNIC_SUPPORT(bp) + 1);
else
free_irq(bp->dev->irq, bp->dev);
}
@@ -1382,12 +1433,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[0].entry);
msix_vec++;
-#ifdef BCM_CNIC
- bp->msix_table[msix_vec].entry = msix_vec;
- BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
- bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
- msix_vec++;
-#endif
+ /* Cnic requires an msix vector for itself */
+ if (CNIC_SUPPORT(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
+ msix_vec, bp->msix_table[msix_vec].entry);
+ msix_vec++;
+ }
+
/* We need separate vectors for ETH queues only (not FCoE) */
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1449,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@@ -1404,7 +1457,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
/* how less vectors we will have? */
int diff = req_cnt - rc;
@@ -1419,7 +1472,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
/*
* decrease number of queues by number of unallocated entries
*/
- bp->num_queues -= diff;
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("New queue configuration set: %d\n",
bp->num_queues);
@@ -1435,6 +1489,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
BNX2X_DEV_INFO("Using single MSI-X vector\n");
bp->flags |= USING_SINGLE_MSIX_FLAG;
+ BNX2X_DEV_INFO("set number of queues to 1\n");
+ bp->num_ethernet_queues = 1;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
@@ -1464,9 +1521,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
return -EBUSY;
}
-#ifdef BCM_CNIC
- offset++;
-#endif
+ if (CNIC_SUPPORT(bp))
+ offset++;
+
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1542,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_PRESENT;
+ offset = 1 + CNIC_SUPPORT(bp);
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
bp->msix_table[0].vector,
0, bp->msix_table[offset].vector,
@@ -1556,19 +1613,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
return 0;
}
+static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i)
+ napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
+static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i)
+ napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
@@ -1576,6 +1649,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
{
if (netif_running(bp->dev)) {
bnx2x_napi_enable(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_napi_enable_cnic(bp);
bnx2x_int_enable(bp);
if (bp->state == BNX2X_STATE_OPEN)
netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1661,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
{
bnx2x_int_disable_sync(bp, disable_hw);
bnx2x_napi_disable(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_napi_disable_cnic(bp);
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct bnx2x *bp = netdev_priv(dev);
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp)) {
+ if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
struct ethhdr *hdr = (struct ethhdr *)skb->data;
u16 ether_type = ntohs(hdr->h_proto);
@@ -1609,7 +1685,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
return bnx2x_fcoe_tx(bp, txq_index);
}
-#endif
+
/* select a non-FCoE queue */
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
@@ -1618,15 +1694,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
void bnx2x_set_num_queues(struct bnx2x *bp)
{
/* RSS queues */
- bp->num_queues = bnx2x_calc_num_queues(bp);
+ bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
-#ifdef BCM_CNIC
/* override in STORAGE SD modes */
if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
- bp->num_queues = 1;
-#endif
+ bp->num_ethernet_queues = 1;
+
/* Add special queues */
- bp->num_queues += NON_ETH_CONTEXT_USE;
+ bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
@@ -1653,20 +1729,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
* bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
* will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
*/
-static int bnx2x_set_real_num_queues(struct bnx2x *bp)
+static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
{
int rc, tx, rx;
tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
- rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
+ rx = BNX2X_NUM_ETH_QUEUES(bp);
/* account for fcoe queue */
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp)) {
- rx += FCOE_PRESENT;
- tx += FCOE_PRESENT;
+ if (include_cnic && !NO_FCOE(bp)) {
+ rx++;
+ tx++;
}
-#endif
rc = netif_set_real_num_tx_queues(bp->dev, tx);
if (rc) {
@@ -1859,14 +1933,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
(bp)->state = BNX2X_STATE_ERROR; \
goto label; \
} while (0)
-#else
+
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+ do { \
+ bp->cnic_loaded = false; \
+ goto label; \
+ } while (0)
+#else /*BNX2X_STOP_ON_ERROR*/
#define LOAD_ERROR_EXIT(bp, label) \
do { \
(bp)->state = BNX2X_STATE_ERROR; \
(bp)->panic = 1; \
return -EBUSY; \
} while (0)
-#endif
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+ do { \
+ bp->cnic_loaded = false; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#endif /*BNX2X_STOP_ON_ERROR*/
bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
{
@@ -1959,10 +2045,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->max_cos = 1;
/* Init txdata pointers */
-#ifdef BCM_CNIC
if (IS_FCOE_FP(fp))
fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
-#endif
if (IS_ETH_FP(fp))
for_each_cos_in_tx_queue(fp, cos)
fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2064,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
else if (bp->flags & GRO_ENABLE_FLAG)
fp->mode = TPA_MODE_GRO;
-#ifdef BCM_CNIC
/* We don't want TPA on an FCoE L2 ring */
if (IS_FCOE_FP(fp))
fp->disable_tpa = 1;
-#endif
+}
+
+int bnx2x_load_cnic(struct bnx2x *bp)
+{
+ int i, rc, port = BP_PORT(bp);
+
+ DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
+
+ mutex_init(&bp->cnic_mutex);
+
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ rc = bnx2x_alloc_fp_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for cnic fps\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ /* Update the number of queues with the cnic queues */
+ rc = bnx2x_set_real_num_queues(bp, 1);
+ if (rc) {
+ BNX2X_ERR("Unable to set real_num_queues including cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ /* Add all CNIC NAPI objects */
+ bnx2x_add_all_napi_cnic(bp);
+ DP(NETIF_MSG_IFUP, "cnic napi added\n");
+ bnx2x_napi_enable_cnic(bp);
+
+ rc = bnx2x_init_hw_func_cnic(bp);
+ if (rc)
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
+
+ bnx2x_nic_init_cnic(bp);
+
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
+ }
+
+ /* Initialize Rx filter. */
+ netif_addr_lock_bh(bp->dev);
+ bnx2x_set_rx_mode(bp->dev);
+ netif_addr_unlock_bh(bp->dev);
+
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
+ bp->cnic_loaded = true;
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+
+
+ DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
+
+ return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error_cnic2:
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+
+load_error_cnic1:
+ bnx2x_napi_disable_cnic(bp);
+ /* Update the number of queues without the cnic queues */
+ rc = bnx2x_set_real_num_queues(bp, 0);
+ if (rc)
+ BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
+load_error_cnic0:
+ BNX2X_ERR("CNIC-related load failed\n");
+ bnx2x_free_fp_mem_cnic(bp);
+ bnx2x_free_mem_cnic(bp);
+ return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
}
@@ -1995,6 +2163,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
u32 load_code;
int i, rc;
+ DP(NETIF_MSG_IFUP, "Starting NIC load\n");
+ DP(NETIF_MSG_IFUP,
+ "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
+
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic)) {
BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2194,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
- memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
- sizeof(struct bnx2x_fp_txdata));
+ memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
+ bp->num_cnic_queues) *
+ sizeof(struct bnx2x_fp_txdata));
+ bp->fcoe_init = false;
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2208,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* As long as bnx2x_alloc_mem() may possibly update
* bp->num_queues, bnx2x_set_real_num_queues() should always
- * come after it.
+ * come after it. At this stage cnic queues are not counted.
*/
- rc = bnx2x_set_real_num_queues(bp);
+ rc = bnx2x_set_real_num_queues(bp, 0);
if (rc) {
BNX2X_ERR("Unable to set real_num_queues\n");
LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2224,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
+ DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
/* set pf load just before approaching the MCP */
@@ -2191,23 +2366,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error3);
}
-#ifdef BCM_CNIC
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
-#endif
-
- for_each_nondefault_queue(bp, i) {
+ for_each_nondefault_eth_queue(bp, i) {
rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
if (rc) {
BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error4);
+ LOAD_ERROR_EXIT(bp, load_error3);
}
}
rc = bnx2x_init_rss_pf(bp);
if (rc) {
BNX2X_ERR("PF RSS init failed\n");
- LOAD_ERROR_EXIT(bp, load_error4);
+ LOAD_ERROR_EXIT(bp, load_error3);
}
/* Now when Clients are configured we are ready to work */
@@ -2217,7 +2387,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_set_eth_mac(bp, true);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
- LOAD_ERROR_EXIT(bp, load_error4);
+ LOAD_ERROR_EXIT(bp, load_error3);
}
if (bp->pending_max) {
@@ -2264,14 +2434,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* start the timer */
mod_timer(&bp->timer, jiffies + bp->current_interval);
-#ifdef BCM_CNIC
- /* re-read iscsi info */
- bnx2x_get_iscsi_info(bp);
- bnx2x_setup_cnic_irq_info(bp);
- bnx2x_setup_cnic_info(bp);
- if (bp->state == BNX2X_STATE_OPEN)
- bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
-#endif
+ if (CNIC_ENABLED(bp))
+ bnx2x_load_cnic(bp);
/* mark driver is loaded in shmem2 */
if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2457,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
bnx2x_dcbx_init(bp, false);
+ DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
-load_error4:
-#ifdef BCM_CNIC
- /* Disable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-#endif
load_error3:
bnx2x_int_disable_sync(bp, 1);
@@ -2338,6 +2499,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
int i;
bool global = false;
+ DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+
/* mark driver is unloaded in shmem2 */
if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
@@ -2373,14 +2536,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
smp_mb();
+ if (CNIC_LOADED(bp))
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
/* Stop Tx */
bnx2x_tx_disable(bp);
netdev_reset_tc(bp->dev);
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
-
bp->rx_mode = BNX2X_RX_MODE_NONE;
del_timer_sync(&bp->timer);
@@ -2414,7 +2576,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
-
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -2435,12 +2598,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_skbs_cnic(bp);
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ if (CNIC_LOADED(bp)) {
+ bnx2x_free_fp_mem_cnic(bp);
+ bnx2x_free_mem_cnic(bp);
+ }
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
+ bp->cnic_loaded = false;
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
@@ -2460,6 +2630,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
+ DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
+
return 0;
}
@@ -2550,7 +2722,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-#ifdef BCM_CNIC
+
/* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB
* has been updated when NAPI was scheduled.
@@ -2559,8 +2731,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
break;
}
-#endif
-
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
@@ -2940,7 +3110,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq_index = skb_get_queue_mapping(skb);
txq = netdev_get_tx_queue(dev, txq_index);
- BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+ BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
txdata = &bp->bnx2x_txq[txq_index];
@@ -3339,13 +3509,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
}
-#ifdef BCM_CNIC
if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
!is_zero_ether_addr(addr->sa_data)) {
BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
return -EINVAL;
}
-#endif
if (netif_running(dev)) {
rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3537,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
u8 cos;
/* Common */
-#ifdef BCM_CNIC
+
if (IS_FCOE_IDX(fp_index)) {
memset(sb, 0, sizeof(union host_hc_status_block));
fp->status_blk_mapping = 0;
-
} else {
-#endif
/* status blocks */
if (!CHIP_IS_E1x(bp))
BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3553,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
bnx2x_fp(bp, fp_index,
status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
}
-#endif
+
/* Rx */
if (!skip_rx_queue(bp, fp_index)) {
bnx2x_free_rx_bds(fp);
@@ -3431,10 +3596,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
+void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+{
+ int i;
+ for_each_cnic_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
void bnx2x_free_fp_mem(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
bnx2x_free_fp_mem_at(bp, i);
}
@@ -3519,14 +3691,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
-#ifdef BCM_CNIC
if (!bp->rx_ring_size &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
- } else
-#endif
- if (!bp->rx_ring_size) {
+ } else if (!bp->rx_ring_size) {
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3719,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
-#ifdef BCM_CNIC
+
if (!IS_FCOE_IDX(index)) {
-#endif
/* status blocks */
if (!CHIP_IS_E1x(bp))
BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3730,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
BNX2X_PCI_ALLOC(sb->e1x_sb,
&bnx2x_fp(bp, index, status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
}
-#endif
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
* set shortcuts for it.
@@ -3641,31 +3807,31 @@ alloc_mem_err:
return 0;
}
+int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp))
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
+ /* we will fail load process instead of mark
+ * NO_FCOE_FLAG
+ */
+ return -ENOMEM;
+
+ return 0;
+}
+
int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
- /**
- * 1. Allocate FP for leading - fatal if error
- * 2. {CNIC} Allocate FCoE FP - fatal if error
- * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
- * 4. Allocate RSS - fix number of queues if error
+ /* 1. Allocate FP for leading - fatal if error
+ * 2. Allocate RSS - fix number of queues if error
*/
/* leading */
if (bnx2x_alloc_fp_mem_at(bp, 0))
return -ENOMEM;
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp))
- /* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
- /* we will fail load process instead of mark
- * NO_FCOE_FLAG
- */
- return -ENOMEM;
-#endif
-
/* RSS */
for_each_nondefault_eth_queue(bp, i)
if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3842,17 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
WARN_ON(delta < 0);
-#ifdef BCM_CNIC
- /**
- * move non eth FPs next to last eth FP
- * must be done in that order
- * FCOE_IDX < FWD_IDX < OOO_IDX
- */
+ if (CNIC_SUPPORT(bp))
+ /* move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
- /* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
-#endif
- bp->num_queues -= delta;
+ /* move FCoE fp even NO_FCOE_FLAG is on */
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
+ bp->num_ethernet_queues -= delta;
+ bp->num_queues = bp->num_ethernet_queues +
+ bp->num_cnic_queues;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
bp->num_queues + delta, bp->num_queues);
}
@@ -3711,7 +3877,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
- int fp_array_size;
+ int fp_array_size, txq_array_size;
int i;
/*
@@ -3721,7 +3887,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3916,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
goto alloc_err;
/* Allocate memory for the transmission queues array */
- bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
-#ifdef BCM_CNIC
- bp->bnx2x_txq_size++;
-#endif
- bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
- sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ txq_array_size =
+ BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
+ BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
+
+ bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
+ GFP_KERNEL);
if (!bp->bnx2x_txq)
goto alloc_err;
@@ -3838,7 +4004,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
return LINK_CONFIG_IDX(sel_phy_idx);
}
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 9c5ea6c..ad28074 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -238,7 +238,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
* @dev_instance: private instance
*/
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
-#ifdef BCM_CNIC
/**
* bnx2x_cnic_notify - send command to cnic driver
@@ -262,8 +261,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
*/
void bnx2x_setup_cnic_info(struct bnx2x *bp);
-#endif
-
/**
* bnx2x_int_enable - enable HW interrupts.
*
@@ -283,7 +280,7 @@ void bnx2x_int_enable(struct bnx2x *bp);
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
- * bnx2x_nic_init - init driver internals.
+ * bnx2x_nic_init_cnic - init driver internals for cnic.
*
* @bp: driver handle
* @load_code: COMMON, PORT or FUNCTION
@@ -293,9 +290,26 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
* - status blocks
* - etc.
*/
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+void bnx2x_nic_init_cnic(struct bnx2x *bp);
/**
+ * bnx2x_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ *
+ * Initializes:
+ * - rings
+ * - status blocks
+ * - etc.
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+/**
+ * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
+/**
* bnx2x_alloc_mem - allocate driver's memory.
*
* @bp: driver handle
@@ -303,6 +317,12 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
int bnx2x_alloc_mem(struct bnx2x *bp);
/**
+ * bnx2x_free_mem_cnic - release driver's memory for cnic.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem_cnic(struct bnx2x *bp);
+/**
* bnx2x_free_mem - release driver's memory.
*
* @bp: driver handle
@@ -407,6 +427,7 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
void bnx2x_set_reset_in_progress(struct bnx2x *bp);
void bnx2x_set_reset_global(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
/**
* bnx2x_sp_event - handle ramrods completion.
@@ -424,6 +445,14 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
void bnx2x_ilt_set_info(struct bnx2x *bp);
/**
+ * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
+ * and TM.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
+
+/**
* bnx2x_dcbx_init - initialize dcbx protocol.
*
* @bp: driver handle
@@ -491,12 +520,17 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
+void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
+int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
+void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
+int bnx2x_load_cnic(struct bnx2x *bp);
/**
* bnx2x_enable_msix - set msix configuration.
@@ -547,7 +581,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
*/
int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
/**
* bnx2x_fcoe_get_wwn - return the requested WWN value for this port
*
@@ -793,23 +827,39 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
+static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
{
int i;
- bp->num_napi_queues = bp->num_queues;
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i)
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, BNX2X_NAPI_WEIGHT);
+}
+
+static inline void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
+static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i)
+ netif_napi_del(&bnx2x_fp(bp, i, napi));
+}
+
static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@@ -979,11 +1029,9 @@ static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
if (!CHIP_IS_E1x(bp)) {
-#ifdef BCM_CNIC
/* there are special statistics counters for FCoE 136..140 */
if (IS_FCOE_FP(fp))
return bp->cnic_base_cl_id + (bp->pf_num >> 1);
-#endif
return fp->cl_id;
}
return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
@@ -1102,7 +1150,6 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
txdata->cid, txdata->txq_index);
}
-#ifdef BCM_CNIC
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
@@ -1162,7 +1209,6 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
}
-#endif
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
@@ -1280,7 +1326,7 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
*/
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
}
-#ifdef BCM_CNIC
+
/**
* bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
*
@@ -1288,7 +1334,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
*
*/
void bnx2x_get_iscsi_info(struct bnx2x *bp);
-#endif
/**
* bnx2x_link_sync_notify - send notification to other functions.
@@ -1340,13 +1385,11 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
{
- if (is_valid_ether_addr(addr))
+ if (is_valid_ether_addr(addr) ||
+ (is_zero_ether_addr(addr) &&
+ (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
return true;
-#ifdef BCM_CNIC
- if (is_zero_ether_addr(addr) &&
- (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
- return true;
-#endif
+
return false;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2245c38..cba4a16 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1908,10 +1908,10 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
/* first the HW mac address */
memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
-#ifdef BCM_CNIC
- /* second SAN address */
- memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
-#endif
+ if (CNIC_LOADED(bp))
+ /* second SAN address */
+ memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
+ netdev->addr_len);
}
static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 6e5bdd1..c40c025 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2901,7 +2901,9 @@ static void bnx2x_get_channels(struct net_device *dev,
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
{
bnx2x_disable_msi(bp);
- BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bp->num_ethernet_queues = num_rss;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
bnx2x_set_int_mode(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 1870492..7eaa74b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -4845,9 +4845,17 @@ struct vif_list_event_data {
__le32 reserved2;
};
-/*
- * union for all event ring message types
- */
+/* function update event data */
+struct function_update_event_data {
+ u8 echo;
+ u8 reserved;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+
+/* union for all event ring message types */
union event_data {
struct vf_pf_event_data vf_pf_event;
struct eth_event_data eth_event;
@@ -4855,6 +4863,7 @@ union event_data {
struct vf_flr_event_data vf_flr_event;
struct malicious_vf_event_data malicious_vf_event;
struct vif_list_event_data vif_list_event;
+ struct function_update_event_data function_update_event;
};
@@ -4984,8 +4993,10 @@ struct function_update_data {
u8 allowed_priorities;
u8 network_cos_mode;
u8 lb_mode_en;
- u8 reserved0;
- __le32 reserved1;
+ u8 tx_switch_suspend_change_flg;
+ u8 tx_switch_suspend;
+ u8 echo;
+ __le16 reserved1;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index fe66d90..d755acf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -648,15 +648,25 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
return rc;
}
+static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
+{
+ int rc = 0;
+
+ if (CONFIGURE_NIC_MODE(bp))
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
- if (!rc)
+ if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
- if (!rc)
- rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
return rc;
}
@@ -781,12 +791,19 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
+static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
+{
+ if (CONFIGURE_NIC_MODE(bp))
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
- bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
- bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+ if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
@@ -890,7 +907,6 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
/****************************************************************************
* SRC initializations
****************************************************************************/
-#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
@@ -915,5 +931,4 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
-#endif
#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index f6cfdc6..c98da25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -11934,8 +11934,8 @@ int bnx2x_phy_probe(struct link_params *params)
return 0;
}
-void bnx2x_init_bmac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_bmac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -11954,8 +11954,8 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_emac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -11973,8 +11973,8 @@ void bnx2x_init_emac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_xmac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_xmac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -11999,8 +11999,8 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_umac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_umac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -12014,8 +12014,8 @@ void bnx2x_init_umac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_xgxs_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_xgxs_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -12066,7 +12066,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
}
-static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+void bnx2x_set_rx_filter(struct link_params *params, u8 en)
{
struct bnx2x *bp = params->bp;
u8 val = en * 0x1F;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 9165b89..ba981ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -432,7 +432,8 @@ int bnx2x_phy_probe(struct link_params *params);
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
-
+/* Open / close the gate between the NIG and the BRB */
+void bnx2x_set_rx_filter(struct link_params *params, u8 en);
/* DCBX structs */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bd1fd3d..3519fed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -791,10 +791,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* host sb data */
-#ifdef BCM_CNIC
if (IS_FCOE_FP(fp))
continue;
-#endif
+
BNX2X_ERR(" run indexes (");
for (j = 0; j < HC_SB_MAX_SM; j++)
pr_cont("0x%x%s",
@@ -859,7 +858,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
#ifdef BNX2X_STOP_ON_ERROR
/* Rings */
/* Rx */
- for_each_rx_queue(bp, i) {
+ for_each_valid_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -893,7 +892,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
/* Tx */
- for_each_tx_queue(bp, i) {
+ for_each_valid_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@ -1483,7 +1482,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -1504,9 +1503,8 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
if (msix) {
synchronize_irq(bp->msix_table[0].vector);
offset = 1;
-#ifdef BCM_CNIC
- offset++;
-#endif
+ if (CNIC_SUPPORT(bp))
+ offset++;
for_each_eth_queue(bp, i)
synchronize_irq(bp->msix_table[offset++].vector);
} else
@@ -1588,9 +1586,8 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
}
-#ifdef BCM_CNIC
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
-#endif
+
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1720,7 +1717,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- mask = 0x2 << (fp->index + CNIC_PRESENT);
+ mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
if (status & mask) {
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
@@ -1732,22 +1729,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
}
}
-#ifdef BCM_CNIC
- mask = 0x2;
- if (status & (mask | 0x1)) {
- struct cnic_ops *c_ops = NULL;
+ if (CNIC_SUPPORT(bp)) {
+ mask = 0x2;
+ if (status & (mask | 0x1)) {
+ struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data, NULL);
- rcu_read_unlock();
- }
+ if (likely(bp->state == BNX2X_STATE_OPEN)) {
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data,
+ NULL);
+ rcu_read_unlock();
+ }
- status &= ~mask;
+ status &= ~mask;
+ }
}
-#endif
if (unlikely(status & 0x1)) {
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -3075,11 +3073,13 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
{
-#ifdef BCM_CNIC
struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
+ if (!CNIC_LOADED(bp))
+ return;
+
memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
bp->fip_mac, ETH_ALEN);
@@ -3162,16 +3162,17 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* ask L5 driver to add data to the struct */
bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
-#endif
}
static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
{
-#ifdef BCM_CNIC
struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
+ if (!CNIC_LOADED(bp))
+ return;
+
memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
@@ -3180,7 +3181,6 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
/* ask L5 driver to add data to the struct */
bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
-#endif
}
/* called due to MCP event (on pmf):
@@ -4572,7 +4572,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
mmiowb(); /* keep prod updates ordered */
}
-#ifdef BCM_CNIC
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
union event_ring_elem *elem)
{
@@ -4594,7 +4593,6 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
}
-#endif
static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
{
@@ -4635,11 +4633,9 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
-#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID(bp))
+ if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
-#endif
vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
@@ -4665,9 +4661,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
}
-#ifdef BCM_CNIC
static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
-#endif
static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
{
@@ -4678,14 +4672,12 @@ static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
/* Send rx_mode command again if was requested */
if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
bnx2x_set_storm_rx_mode(bp);
-#ifdef BCM_CNIC
else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
&bp->sp_state))
bnx2x_set_iscsi_eth_rx_mode(bp, true);
else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
&bp->sp_state))
bnx2x_set_iscsi_eth_rx_mode(bp, false);
-#endif
netif_addr_unlock_bh(bp->dev);
}
@@ -4747,7 +4739,6 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
-#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4770,22 +4761,16 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
bnx2x_link_report(bp);
bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
}
-#else
- /* If no FCoE ring - ACK MCP now */
- bnx2x_link_report(bp);
- bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
-#endif /* BCM_CNIC */
}
static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
struct bnx2x *bp, u32 cid)
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
-#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID(bp))
+
+ if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
-#endif
return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
@@ -4793,6 +4778,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
{
u16 hw_cons, sw_cons, sw_prod;
union event_ring_elem *elem;
+ u8 echo;
u32 cid;
u8 opcode;
int spqe_cnt = 0;
@@ -4847,10 +4833,11 @@ static void bnx2x_eq_int(struct bnx2x *bp)
*/
DP(BNX2X_MSG_SP,
"got delete ramrod for MULTI[%d]\n", cid);
-#ifdef BCM_CNIC
- if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+
+ if (CNIC_LOADED(bp) &&
+ !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
goto next_spqe;
-#endif
+
q_obj = bnx2x_cid_to_q_obj(bp, cid);
if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
@@ -4875,21 +4862,34 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
+
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
- DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
- "AFEX: ramrod completed FUNCTION_UPDATE\n");
- f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+ echo = elem->message.data.function_update_event.echo;
+ if (echo == SWITCH_UPDATE) {
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_SWITCH_UPDATE ramrod\n");
+ if (f_obj->complete_cmd(
+ bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
+ break;
- /* We will perform the Queues update from sp_rtnl task
- * as all Queue SP operations should run under
- * rtnl_lock.
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
+ } else {
+ DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+ "AFEX: ramrod completed FUNCTION_UPDATE\n");
+ f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_AFEX_UPDATE);
+
+ /* We will perform the Queues update from
+ * sp_rtnl task as all Queue SP operations
+ * should run under rtnl_lock.
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
goto next_spqe;
case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@ -4999,11 +4999,10 @@ static void bnx2x_sp_task(struct work_struct *work)
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
-#ifdef BCM_CNIC
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- if ((!NO_FCOE(bp)) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ if (FCOE_INIT(bp) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/*
* Prevent local bottom-halves from running as
* we are going to change the local NAPI list.
@@ -5012,7 +5011,7 @@ static void bnx2x_sp_task(struct work_struct *work)
napi_schedule(&bnx2x_fcoe(bp, napi));
local_bh_enable();
}
-#endif
+
/* Handle EQ completions */
bnx2x_eq_int(bp);
@@ -5050,8 +5049,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
return IRQ_HANDLED;
#endif
-#ifdef BCM_CNIC
- {
+ if (CNIC_LOADED(bp)) {
struct cnic_ops *c_ops;
rcu_read_lock();
@@ -5060,7 +5058,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
c_ops->cnic_handler(bp->cnic_data, NULL);
rcu_read_unlock();
}
-#endif
+
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
return IRQ_HANDLED;
@@ -5498,12 +5496,10 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
-#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* Configure rx_mode of FCoE Queue */
__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-#endif
switch (bp->rx_mode) {
case BNX2X_RX_MODE_NONE:
@@ -5624,12 +5620,12 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
{
- return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+ return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
}
static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
{
- return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
}
static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5720,23 +5716,25 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
txdata->tx_pkt = 0;
}
+static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue_cnic(bp, i)
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
+}
static void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i;
u8 cos;
- for_each_tx_queue(bp, i)
+ for_each_eth_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
- int i;
-
- for_each_eth_queue(bp, i)
- bnx2x_init_eth_fp(bp, i);
-#ifdef BCM_CNIC
if (!NO_FCOE(bp))
bnx2x_init_fcoe_fp(bp);
@@ -5744,8 +5742,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
BNX2X_VF_ID_INVALID, false,
bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
-#endif
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings_cnic(bp);
+ bnx2x_init_tx_rings_cnic(bp);
+
+ /* flush all */
+ mb();
+ mmiowb();
+}
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_init_eth_fp(bp, i);
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
@@ -6031,10 +6043,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
msleep(50);
bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
-#ifndef BCM_CNIC
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
+ if (!CNIC_SUPPORT(bp))
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
/* Enable inputs of parser neighbor blocks */
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
@@ -6522,9 +6533,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, QM_REG_SOFT_RESET, 1);
REG_WR(bp, QM_REG_SOFT_RESET, 0);
-#ifdef BCM_CNIC
- bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
-#endif
+ if (CNIC_SUPPORT(bp))
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
@@ -6611,18 +6621,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
-#ifdef BCM_CNIC
- REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
- REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
- REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
- REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
- REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
- REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
- REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
- REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
- REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
- REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
-#endif
+ if (CNIC_SUPPORT(bp)) {
+ REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+ }
REG_WR(bp, SRC_REG_SOFT_RST, 0);
if (sizeof(union cdu_context) != 1024)
@@ -6786,11 +6796,11 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
/* QM cid (connection) count */
bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
-#ifdef BCM_CNIC
- bnx2x_init_block(bp, BLOCK_TM, init_phase);
- REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
- REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
-#endif
+ if (CNIC_SUPPORT(bp)) {
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+ }
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
@@ -6877,9 +6887,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
}
-#ifdef BCM_CNIC
- bnx2x_init_block(bp, BLOCK_SRC, init_phase);
-#endif
+ if (CNIC_SUPPORT(bp))
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+
bnx2x_init_block(bp, BLOCK_CDU, init_phase);
bnx2x_init_block(bp, BLOCK_CFC, init_phase);
@@ -7040,6 +7050,130 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
bnx2x_ilt_wr(bp, i, 0);
}
+
+static void bnx2x_init_searcher(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+}
+
+static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
+{
+ int rc;
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_switch_update_params *switch_update_params =
+ &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ switch_update_params->suspend = suspend;
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+
+ return rc;
+}
+
+static int bnx2x_reset_nic_mode(struct bnx2x *bp)
+{
+ int rc, i, port = BP_PORT(bp);
+ int vlan_en = 0, mac_en[NUM_MACS];
+
+
+ /* Close input from network */
+ if (bp->mf_mode == SINGLE_FUNCTION) {
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ } else {
+ vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN);
+ REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN, 0);
+ for (i = 0; i < NUM_MACS; i++) {
+ mac_en[i] = REG_RD(bp, port ?
+ (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE +
+ 4 * i));
+ REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
+ }
+ }
+
+ /* Close BMC to host */
+ REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+ NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
+
+ /* Suspend Tx switching to the PF. Completion of this ramrod
+ * further guarantees that all the packets of that PF / child
+ * VFs in BRB were processed by the Parser, so it is safe to
+ * change the NIC_MODE register.
+ */
+ rc = bnx2x_func_switch_update(bp, 1);
+ if (rc) {
+ BNX2X_ERR("Can't suspend tx-switching!\n");
+ return rc;
+ }
+
+ /* Change NIC_MODE register */
+ REG_WR(bp, PRS_REG_NIC_MODE, 0);
+
+ /* Open input from network */
+ if (bp->mf_mode == SINGLE_FUNCTION) {
+ bnx2x_set_rx_filter(&bp->link_params, 1);
+ } else {
+ REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN, vlan_en);
+ for (i = 0; i < NUM_MACS; i++) {
+ REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
+ mac_en[i]);
+ }
+ }
+
+ /* Enable BMC to host */
+ REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+ NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
+
+ /* Resume Tx switching to the PF */
+ rc = bnx2x_func_switch_update(bp, 0);
+ if (rc) {
+ BNX2X_ERR("Can't resume tx-switching!\n");
+ return rc;
+ }
+
+ DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+ return 0;
+}
+
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
+{
+ int rc;
+
+ bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
+
+ if (CONFIGURE_NIC_MODE(bp)) {
+ /* Configrue searcher as part of function hw init */
+ bnx2x_init_searcher(bp);
+
+ /* Reset NIC mode */
+ rc = bnx2x_reset_nic_mode(bp);
+ if (rc)
+ BNX2X_ERR("Can't change NIC mode!\n");
+ return rc;
+ }
+
+ return 0;
+}
+
static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -7082,17 +7216,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
}
bnx2x_ilt_init_op(bp, INITOP_SET);
-#ifdef BCM_CNIC
- bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
-
- /* T1 hash bits value determines the T1 number of entries */
- REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
-#endif
+ if (!CONFIGURE_NIC_MODE(bp)) {
+ bnx2x_init_searcher(bp);
+ REG_WR(bp, PRS_REG_NIC_MODE, 0);
+ DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+ } else {
+ /* Set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+ DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
-#ifndef BCM_CNIC
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif /* BCM_CNIC */
+ }
if (!CHIP_IS_E1x(bp)) {
u32 pf_conf = IGU_PF_CONF_FUNC_EN;
@@ -7343,6 +7476,20 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
}
+void bnx2x_free_mem_cnic(struct bnx2x *bp)
+{
+ bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
+
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+}
+
void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
@@ -7367,17 +7514,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_FREE(bp->ilt->lines);
-#ifdef BCM_CNIC
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e1x));
-
- BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
-#endif
-
BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -7445,24 +7581,44 @@ alloc_mem_err:
return -ENOMEM;
}
-
-int bnx2x_alloc_mem(struct bnx2x *bp)
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- int i, allocated, context_size;
-
-#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
sizeof(struct host_hc_status_block_e2));
else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e1x));
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
+ &bp->cnic_sb_mapping,
+ sizeof(struct
+ host_hc_status_block_e1x));
- /* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
-#endif
+ if (CONFIGURE_NIC_MODE(bp))
+ /* allocate searcher T2 table, as it wan't allocated before */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+
+ /* write address to which L5 should insert its values */
+ bp->cnic_eth_dev.addr_drv_info_to_mcp =
+ &bp->slowpath->drv_info_to_mcp;
+
+ if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_mem_cnic(bp);
+ BNX2X_ERR("Can't allocate memory\n");
+ return -ENOMEM;
+}
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+ int i, allocated, context_size;
+ if (!CONFIGURE_NIC_MODE(bp))
+ /* allocate searcher T2 table */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
@@ -7470,11 +7626,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
-#ifdef BCM_CNIC
- /* write address to which L5 should insert its values */
- bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
-#endif
-
/* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
@@ -7596,14 +7747,12 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
unsigned long ramrod_flags = 0;
-#ifdef BCM_CNIC
if (is_zero_ether_addr(bp->dev->dev_addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
"Ignoring Zero MAC for STORAGE SD mode\n");
return 0;
}
-#endif
DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
@@ -7632,7 +7781,8 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
bnx2x_enable_msi(bp);
/* falling through... */
case INT_MODE_INTx:
- bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ bp->num_ethernet_queues = 1;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
@@ -7644,9 +7794,10 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
bp->flags & USING_SINGLE_MSIX_FLAG) {
/* failed to enable multiple MSI-X */
BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
- bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ bp->num_queues = 1 + bp->num_cnic_queues;
/* Try to enable MSI */
if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@ -7679,9 +7830,9 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilt_client->flags = ILT_CLIENT_SKIP_MEM;
ilt_client->start = line;
line += bnx2x_cid_ilt_lines(bp);
-#ifdef BCM_CNIC
- line += CNIC_ILT_LINES;
-#endif
+
+ if (CNIC_SUPPORT(bp))
+ line += CNIC_ILT_LINES;
ilt_client->end = line - 1;
DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
@@ -7714,49 +7865,43 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilog2(ilt_client->page_size >> 12));
}
- /* SRC */
- ilt_client = &ilt->clients[ILT_CLIENT_SRC];
-#ifdef BCM_CNIC
- ilt_client->client_num = ILT_CLIENT_SRC;
- ilt_client->page_size = SRC_ILT_PAGE_SZ;
- ilt_client->flags = 0;
- ilt_client->start = line;
- line += SRC_ILT_LINES;
- ilt_client->end = line - 1;
- DP(NETIF_MSG_IFUP,
- "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
- ilt_client->start,
- ilt_client->end,
- ilt_client->page_size,
- ilt_client->flags,
- ilog2(ilt_client->page_size >> 12));
+ if (CNIC_SUPPORT(bp)) {
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
-#else
- ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
-#endif
+ DP(NETIF_MSG_IFUP,
+ "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
- /* TM */
- ilt_client = &ilt->clients[ILT_CLIENT_TM];
-#ifdef BCM_CNIC
- ilt_client->client_num = ILT_CLIENT_TM;
- ilt_client->page_size = TM_ILT_PAGE_SZ;
- ilt_client->flags = 0;
- ilt_client->start = line;
- line += TM_ILT_LINES;
- ilt_client->end = line - 1;
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
- DP(NETIF_MSG_IFUP,
- "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
- ilt_client->start,
- ilt_client->end,
- ilt_client->page_size,
- ilt_client->flags,
- ilog2(ilt_client->page_size >> 12));
+ DP(NETIF_MSG_IFUP,
+ "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+ }
-#else
- ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
-#endif
BUG_ON(line > ILT_MAX_LINES);
}
@@ -7823,7 +7968,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
}
}
-int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_queue_state_params *q_params,
struct bnx2x_queue_setup_tx_only_params *tx_only_params,
int tx_index, bool leading)
@@ -7924,6 +8069,9 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Set the command */
q_params.cmd = BNX2X_Q_CMD_SETUP;
+ if (IS_FCOE_FP(fp))
+ bp->fcoe_init = true;
+
/* Change the state to SETUP */
rc = bnx2x_queue_state_change(bp, &q_params);
if (rc) {
@@ -8037,12 +8185,12 @@ static void bnx2x_reset_func(struct bnx2x *bp)
SB_DISABLED);
}
-#ifdef BCM_CNIC
- /* CNIC SB */
- REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
- SB_DISABLED);
-#endif
+ if (CNIC_LOADED(bp))
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
+ (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
+
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
@@ -8061,19 +8209,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
}
-#ifdef BCM_CNIC
- /* Disable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
- /*
- * Wait for at least 10ms and up to 2 second for the timers scan to
- * complete
- */
- for (i = 0; i < 200; i++) {
- msleep(10);
- if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
- break;
+ if (CNIC_LOADED(bp)) {
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+ /*
+ * Wait for at least 10ms and up to 2 second for the timers
+ * scan to complete
+ */
+ for (i = 0; i < 200; i++) {
+ msleep(10);
+ if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+ break;
+ }
}
-#endif
/* Clear ILT */
bnx2x_clear_func_ilt(bp, func);
@@ -8409,13 +8557,24 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Close multi and leading connections
* Completions for ramrods are collected in a synchronous way
*/
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
if (bnx2x_stop_queue(bp, i))
#ifdef BNX2X_STOP_ON_ERROR
return;
#else
goto unload_error;
#endif
+
+ if (CNIC_LOADED(bp)) {
+ for_each_cnic_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+ }
+
/* If SP settings didn't get completed so far - something
* very wrong has happen.
*/
@@ -8437,6 +8596,8 @@ unload_error:
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -8850,7 +9011,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
return 0;
}
-int bnx2x_leader_reset(struct bnx2x *bp)
+static int bnx2x_leader_reset(struct bnx2x *bp)
{
int rc = 0;
bool global = bnx2x_reset_is_global(bp);
@@ -10224,12 +10385,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
u32 no_flags = NO_ISCSI_FLAG;
-#ifdef BCM_CNIC
int port = BP_PORT(bp);
-
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_iscsi_conn);
+ if (!CNIC_SUPPORT(bp)) {
+ bp->flags |= no_flags;
+ return;
+ }
+
/* Get the number of maximum allowed iSCSI connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
@@ -10244,12 +10408,9 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
bp->flags |= no_flags;
-#else
- bp->flags |= no_flags;
-#endif
+
}
-#ifdef BCM_CNIC
static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
{
/* Port info */
@@ -10264,16 +10425,18 @@ static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
}
-#endif
static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
{
-#ifdef BCM_CNIC
int port = BP_PORT(bp);
int func = BP_ABS_FUNC(bp);
-
u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_fcoe_conn);
+ if (!CNIC_SUPPORT(bp)) {
+ bp->flags |= NO_FCOE_FLAG;
+ return;
+ }
+
/* Get the number of maximum allowed FCoE connections */
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
@@ -10319,9 +10482,6 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
*/
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
-#else
- bp->flags |= NO_FCOE_FLAG;
-#endif
}
static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -10335,132 +10495,133 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
bnx2x_get_fcoe_info(bp);
}
-static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+static void __devinit bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
{
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
-#ifdef BCM_CNIC
u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
u8 *fip_mac = bp->fip_mac;
-#endif
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- if (BP_NOMCP(bp)) {
- BNX2X_ERROR("warning: random MAC workaround active\n");
- eth_hw_addr_random(bp->dev);
- } else if (IS_MF(bp)) {
- val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
- val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
- if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
-
-#ifdef BCM_CNIC
- /*
- * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ if (IS_MF(bp)) {
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
- *
- * In non SD mode features configuration comes from
- * struct func_ext_config.
+ * In non SD mode features configuration comes from struct
+ * func_ext_config.
*/
- if (!IS_MF_SD(bp)) {
+ if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
- iscsi_mac_addr_upper);
+ iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
- iscsi_mac_addr_lower);
+ iscsi_mac_addr_lower);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
- BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
- iscsi_mac);
- } else
+ BNX2X_DEV_INFO
+ ("Read iSCSI MAC: %pM\n", iscsi_mac);
+ } else {
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+ }
if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
- fcoe_mac_addr_upper);
+ fcoe_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
- fcoe_mac_addr_lower);
+ fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
- fip_mac);
-
- } else
+ BNX2X_DEV_INFO
+ ("Read FCoE L2 MAC: %pM\n", fip_mac);
+ } else {
bp->flags |= NO_FCOE_FLAG;
+ }
bp->mf_ext_config = cfg;
} else { /* SD MODE */
- if (IS_MF_STORAGE_SD(bp)) {
- if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
- /* use primary mac as iscsi mac */
- memcpy(iscsi_mac, bp->dev->dev_addr,
- ETH_ALEN);
-
- BNX2X_DEV_INFO("SD ISCSI MODE\n");
- BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
- iscsi_mac);
- } else { /* FCoE */
- memcpy(fip_mac, bp->dev->dev_addr,
- ETH_ALEN);
- BNX2X_DEV_INFO("SD FCoE MODE\n");
- BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
- fip_mac);
- }
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
+ if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO
+ ("Read iSCSI MAC: %pM\n", iscsi_mac);
+ } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+ /* use primary mac as fip mac */
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ BNX2X_DEV_INFO("SD FCoE MODE\n");
+ BNX2X_DEV_INFO
+ ("Read FIP MAC: %pM\n", fip_mac);
}
}
+ if (IS_MF_STORAGE_SD(bp))
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
if (IS_MF_FCOE_AFEX(bp))
/* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-#endif
} else {
- /* in SF read MACs from port configuration */
- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
- val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
-
-#ifdef BCM_CNIC
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
- iscsi_mac_upper);
+ iscsi_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
- iscsi_mac_lower);
+ iscsi_mac_lower);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
- fcoe_fip_mac_upper);
+ fcoe_fip_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
- fcoe_fip_mac_lower);
+ fcoe_fip_mac_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
-#endif
}
- memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
-
-#ifdef BCM_CNIC
- /* Disable iSCSI if MAC configuration is
- * invalid.
- */
+ /* Disable iSCSI OOO if MAC configuration is invalid. */
if (!is_valid_ether_addr(iscsi_mac)) {
- bp->flags |= NO_ISCSI_FLAG;
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
memset(iscsi_mac, 0, ETH_ALEN);
}
- /* Disable FCoE if MAC configuration is
- * invalid.
- */
+ /* Disable FCoE if MAC configuration is invalid. */
if (!is_valid_ether_addr(fip_mac)) {
bp->flags |= NO_FCOE_FLAG;
memset(bp->fip_mac, 0, ETH_ALEN);
}
-#endif
+}
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ eth_hw_addr_random(bp->dev);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_get_cnic_mac_hwinfo(bp);
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_get_cnic_mac_hwinfo(bp);
+ }
+
+ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
@@ -10837,9 +10998,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
-#ifdef BCM_CNIC
- mutex_init(&bp->cnic_mutex);
-#endif
+
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -10877,10 +11036,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->disable_tpa = disable_tpa;
-
-#ifdef BCM_CNIC
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
-#endif
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -10914,12 +11070,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
bnx2x_dcbx_init_params(bp);
-#ifdef BCM_CNIC
if (CHIP_IS_E1x(bp))
bp->cnic_base_cl_id = FP_SB_MAX_E1x;
else
bp->cnic_base_cl_id = FP_SB_MAX_E2;
-#endif
/* multiple tx priority */
if (CHIP_IS_E1x(bp))
@@ -10929,6 +11083,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ /* We need at least one default status block for slow-path events,
+ * second status block for the L2 queue, and a third status block for
+ * CNIC if supproted.
+ */
+ if (CNIC_SUPPORT(bp))
+ bp->min_msix_vec_cnt = 3;
+ else
+ bp->min_msix_vec_cnt = 2;
+ BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
+
return rc;
}
@@ -11165,11 +11329,9 @@ void bnx2x_set_rx_mode(struct net_device *dev)
}
bp->rx_mode = rx_mode;
-#ifdef BCM_CNIC
/* handle ISCSI SD mode */
if (IS_MF_ISCSI_SD(bp))
bp->rx_mode = BNX2X_RX_MODE_NONE;
-#endif
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -11281,7 +11443,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
};
@@ -11747,9 +11909,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
-#ifdef BCM_CNIC
- cid_count += CNIC_CID_MAX;
-#endif
+ if (CNIC_SUPPORT(bp))
+ cid_count += CNIC_CID_MAX;
return roundup(cid_count, QM_CID_ROUND);
}
@@ -11759,7 +11920,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
+ int cnic_cnt)
{
int pos;
u16 control;
@@ -11771,7 +11933,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
* one fast path queue: one FP queue + SB for CNIC
*/
if (!pos)
- return 1 + CNIC_PRESENT;
+ return 1 + cnic_cnt;
/*
* The value in the PCI configuration space is the index of the last
@@ -11791,6 +11953,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
+ int cnic_cnt;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11834,21 +11997,22 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+ cnic_cnt = 1;
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = max_non_def_sbs - CNIC_PRESENT;
+ rss_count = max_non_def_sbs - cnic_cnt;
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
- rx_count = rss_count + FCOE_PRESENT;
+ rx_count = rss_count + cnic_cnt;
/*
* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = rss_count * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + cnic_cnt;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11859,6 +12023,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
+ bp->cnic_support = cnic_cnt;
+
pci_set_drvdata(pdev, dev);
rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
@@ -11867,6 +12033,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return rc;
}
+ BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
@@ -11899,10 +12066,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
-#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x */
+ /* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
+
/* disable FCOE for 57840 device, until FW supports it */
switch (ent->driver_data) {
case BCM57840_O:
@@ -11912,8 +12079,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
case BCM57840_MF:
bp->flags |= NO_FCOE_FLAG;
}
-#endif
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -11929,14 +12094,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
-#ifdef BCM_CNIC
+
if (!NO_FCOE(bp)) {
/* Add storage MAC address */
rtnl_lock();
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-#endif
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
@@ -11981,14 +12145,12 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
}
bp = netdev_priv(dev);
-#ifdef BCM_CNIC
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-#endif
#ifdef BCM_DCBNL
/* Delete app tlvs from dcbnl */
@@ -12036,15 +12198,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->rx_mode = BNX2X_RX_MODE_NONE;
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
+ if (CNIC_LOADED(bp))
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
/* Stop Tx */
bnx2x_tx_disable(bp);
bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
del_timer_sync(&bp->timer);
@@ -12235,7 +12399,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp)
module_init(bnx2x_init);
module_exit(bnx2x_cleanup);
-#ifdef BCM_CNIC
/**
* bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
*
@@ -12688,12 +12851,31 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ int rc;
+
+ DP(NETIF_MSG_IFUP, "Register_cnic called\n");
if (ops == NULL) {
BNX2X_ERR("NULL ops received\n");
return -EINVAL;
}
+ if (!CNIC_SUPPORT(bp)) {
+ BNX2X_ERR("Can't register CNIC when not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!CNIC_LOADED(bp)) {
+ rc = bnx2x_load_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("CNIC-related load failed\n");
+ return rc;
+ }
+
+ }
+
+ bp->cnic_enabled = true;
+
bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!bp->cnic_kwq)
return -ENOMEM;
@@ -12785,5 +12967,4 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
}
EXPORT_SYMBOL(bnx2x_cnic_probe);
-#endif /* BCM_CNIC */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 1b1999d..7d93adb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2107,6 +2107,7 @@
#define NIG_REG_LLH1_ERROR_MASK 0x10090
/* [RW 8] event id for llh1 */
#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_EN 0x16104
#define NIG_REG_LLH1_FUNC_MEM 0x161c0
#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
@@ -2302,6 +2303,15 @@
* set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
* accommodate the 9 input clients to ETS arbiter. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
+ * for BRB LB interface is bypassed and PBF LB traffic is always selected to
+ * send to BRB LB.
+ */
+#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4
#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
#define NIG_REG_P1_MAC_IN_EN 0x185c0
/* [RW 1] Output enable for TX MAC interface */
@@ -2418,6 +2428,12 @@
#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
/* [R 1] TX FIFO for transmitting data to MAC is empty. */
#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ */
+#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8
/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
forwarded to the host. */
#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 614981c..b8b4b74 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5350,12 +5350,24 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED;
+
+ /* Switch_update ramrod can be sent in either started or
+ * tx_stopped state, and it doesn't change the state.
+ */
+ else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
else if (cmd == BNX2X_F_CMD_TX_STOP)
next_state = BNX2X_F_STATE_TX_STOPPED;
break;
case BNX2X_F_STATE_TX_STOPPED:
- if (cmd == BNX2X_F_CMD_TX_START)
+ if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ else if (cmd == BNX2X_F_CMD_TX_START)
next_state = BNX2X_F_STATE_STARTED;
break;
@@ -5637,6 +5649,28 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
}
+static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_update_data *rdata =
+ (struct function_update_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_switch_update_params *switch_update_params =
+ &params->params.switch_update;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->tx_switch_suspend_change_flg = 1;
+ rdata->tx_switch_suspend = switch_update_params->suspend;
+ rdata->echo = SWITCH_UPDATE;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
@@ -5657,6 +5691,7 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
cpu_to_le16(afex_update_params->afex_default_vlan);
rdata->allowed_priorities_change_flg = 1;
rdata->allowed_priorities = afex_update_params->allowed_priorities;
+ rdata->echo = AFEX_UPDATE;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
@@ -5773,6 +5808,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
return bnx2x_func_send_tx_stop(bp, params);
case BNX2X_F_CMD_TX_START:
return bnx2x_func_send_tx_start(bp, params);
+ case BNX2X_F_CMD_SWITCH_UPDATE:
+ return bnx2x_func_send_switch_update(bp, params);
default:
BNX2X_ERR("Unknown command: %d\n", params->cmd);
return -EINVAL;
@@ -5818,16 +5855,30 @@ int bnx2x_func_state_change(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
struct bnx2x_func_sp_obj *o = params->f_obj;
- int rc;
+ int rc, cnt = 300;
enum bnx2x_func_cmd cmd = params->cmd;
unsigned long *pending = &o->pending;
mutex_lock(&o->one_pending_mutex);
/* Check that the requested transition is legal */
- if (o->check_transition(bp, o, params)) {
+ rc = o->check_transition(bp, o, params);
+ if ((rc == -EBUSY) &&
+ (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
+ while ((rc == -EBUSY) && (--cnt > 0)) {
+ mutex_unlock(&o->one_pending_mutex);
+ msleep(10);
+ mutex_lock(&o->one_pending_mutex);
+ rc = o->check_transition(bp, o, params);
+ }
+ if (rc == -EBUSY) {
+ mutex_unlock(&o->one_pending_mutex);
+ BNX2X_ERR("timeout waiting for previous ramrod completion\n");
+ return rc;
+ }
+ } else if (rc) {
mutex_unlock(&o->one_pending_mutex);
- return -EINVAL;
+ return rc;
}
/* Set "pending" bit */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index acf2fe4..adbd91b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -40,6 +40,12 @@ enum {
* pending commands list.
*/
RAMROD_CONT,
+ /* If there is another pending ramrod, wait until it finishes and
+ * re-try to submit this one. This flag can be set only in sleepable
+ * context, and should not be set from the context that completes the
+ * ramrods as deadlock will occur.
+ */
+ RAMROD_RETRY,
};
typedef enum {
@@ -1061,6 +1067,7 @@ enum bnx2x_func_cmd {
BNX2X_F_CMD_AFEX_VIFLISTS,
BNX2X_F_CMD_TX_STOP,
BNX2X_F_CMD_TX_START,
+ BNX2X_F_CMD_SWITCH_UPDATE,
BNX2X_F_CMD_MAX,
};
@@ -1103,6 +1110,10 @@ struct bnx2x_func_start_params {
u8 network_cos_mode;
};
+struct bnx2x_func_switch_update_params {
+ u8 suspend;
+};
+
struct bnx2x_func_afex_update_params {
u16 vif_id;
u16 afex_default_vlan;
@@ -1136,6 +1147,7 @@ struct bnx2x_func_state_params {
struct bnx2x_func_hw_init_params hw_init;
struct bnx2x_func_hw_reset_params hw_reset;
struct bnx2x_func_start_params start;
+ struct bnx2x_func_switch_update_params switch_update;
struct bnx2x_func_afex_update_params afex_update;
struct bnx2x_func_afex_viflists_params afex_viflists;
struct bnx2x_func_tx_start_params tx_start;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a8800ac..5cc976d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -90,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 125
+#define TG3_MIN_NUM 127
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "September 26, 2012"
+#define DRV_MODULE_RELDATE "November 14, 2012"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -226,6 +226,9 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
+#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
+
static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
@@ -245,20 +248,28 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+ TG3_DRV_DATA_FLAG_5705_10_100},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+ TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+ TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
@@ -266,8 +277,13 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
+ PCI_VENDOR_ID_LENOVO,
+ TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
@@ -286,18 +302,28 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
@@ -398,19 +424,27 @@ static const struct {
};
#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
+#define TG3_NVRAM_TEST 0
+#define TG3_LINK_TEST 1
+#define TG3_REGISTER_TEST 2
+#define TG3_MEMORY_TEST 3
+#define TG3_MAC_LOOPB_TEST 4
+#define TG3_PHY_LOOPB_TEST 5
+#define TG3_EXT_LOOPB_TEST 6
+#define TG3_INTERRUPT_TEST 7
static const struct {
const char string[ETH_GSTRING_LEN];
} ethtool_test_keys[] = {
- { "nvram test (online) " },
- { "link test (online) " },
- { "register test (offline)" },
- { "memory test (offline)" },
- { "mac loopback test (offline)" },
- { "phy loopback test (offline)" },
- { "ext loopback test (offline)" },
- { "interrupt test (offline)" },
+ [TG3_NVRAM_TEST] = { "nvram test (online) " },
+ [TG3_LINK_TEST] = { "link test (online) " },
+ [TG3_REGISTER_TEST] = { "register test (offline)" },
+ [TG3_MEMORY_TEST] = { "memory test (offline)" },
+ [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
+ [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
+ [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
+ [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
};
#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
@@ -2447,6 +2481,18 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err;
}
+static void tg3_carrier_on(struct tg3 *tp)
+{
+ netif_carrier_on(tp->dev);
+ tp->link_up = true;
+}
+
+static void tg3_carrier_off(struct tg3 *tp)
+{
+ netif_carrier_off(tp->dev);
+ tp->link_up = false;
+}
+
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
@@ -2465,8 +2511,8 @@ static int tg3_phy_reset(struct tg3 *tp)
if (err != 0)
return -EBUSY;
- if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
- netif_carrier_off(tp->dev);
+ if (netif_running(tp->dev) && tp->link_up) {
+ tg3_carrier_off(tp);
tg3_link_report(tp);
}
@@ -4160,6 +4206,24 @@ static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
return true;
}
+static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
+{
+ if (curr_link_up != tp->link_up) {
+ if (curr_link_up) {
+ tg3_carrier_on(tp);
+ } else {
+ tg3_carrier_off(tp);
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+
+ tg3_link_report(tp);
+ return true;
+ }
+
+ return false;
+}
+
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
{
int current_link_up;
@@ -4192,7 +4256,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
- netif_carrier_ok(tp->dev)) {
+ tp->link_up) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
!(bmsr & BMSR_LSTATUS))
@@ -4434,13 +4498,7 @@ relink:
PCI_EXP_LNKCTL_CLKREQ_EN);
}
- if (current_link_up != netif_carrier_ok(tp->dev)) {
- if (current_link_up)
- netif_carrier_on(tp->dev);
- else
- netif_carrier_off(tp->dev);
- tg3_link_report(tp);
- }
+ tg3_test_and_report_link_chg(tp, current_link_up);
return 0;
}
@@ -5080,7 +5138,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
orig_active_duplex = tp->link_config.active_duplex;
if (!tg3_flag(tp, HW_AUTONEG) &&
- netif_carrier_ok(tp->dev) &&
+ tp->link_up &&
tg3_flag(tp, INIT_COMPLETE)) {
mac_status = tr32(MAC_STATUS);
mac_status &= (MAC_STATUS_PCS_SYNCED |
@@ -5158,13 +5216,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
LED_CTRL_TRAFFIC_OVERRIDE));
}
- if (current_link_up != netif_carrier_ok(tp->dev)) {
- if (current_link_up)
- netif_carrier_on(tp->dev);
- else
- netif_carrier_off(tp->dev);
- tg3_link_report(tp);
- } else {
+ if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
u32 now_pause_cfg = tp->link_config.active_flowctrl;
if (orig_pause_cfg != now_pause_cfg ||
orig_active_speed != tp->link_config.active_speed ||
@@ -5257,7 +5309,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
new_bmcr |= BMCR_SPEED1000;
/* Force a linkdown */
- if (netif_carrier_ok(tp->dev)) {
+ if (tp->link_up) {
u32 adv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
@@ -5269,7 +5321,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
BMCR_ANRESTART |
BMCR_ANENABLE);
udelay(10);
- netif_carrier_off(tp->dev);
+ tg3_carrier_off(tp);
}
tg3_writephy(tp, MII_BMCR, new_bmcr);
bmcr = new_bmcr;
@@ -5335,15 +5387,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
tp->link_config.active_speed = current_speed;
tp->link_config.active_duplex = current_duplex;
- if (current_link_up != netif_carrier_ok(tp->dev)) {
- if (current_link_up)
- netif_carrier_on(tp->dev);
- else {
- netif_carrier_off(tp->dev);
- tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
- }
- tg3_link_report(tp);
- }
+ tg3_test_and_report_link_chg(tp, current_link_up);
return err;
}
@@ -5355,7 +5399,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
return;
}
- if (!netif_carrier_ok(tp->dev) &&
+ if (!tp->link_up &&
(tp->link_config.autoneg == AUTONEG_ENABLE)) {
u32 bmcr;
@@ -5385,7 +5429,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
}
}
- } else if (netif_carrier_ok(tp->dev) &&
+ } else if (tp->link_up &&
(tp->link_config.autoneg == AUTONEG_ENABLE) &&
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
u32 phy2;
@@ -5451,7 +5495,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
(32 << TX_LENGTHS_SLOT_TIME_SHIFT));
if (!tg3_flag(tp, 5705_PLUS)) {
- if (netif_carrier_ok(tp->dev)) {
+ if (tp->link_up) {
tw32(HOSTCC_STAT_COAL_TICKS,
tp->coal.stats_block_coalesce_usecs);
} else {
@@ -5461,7 +5505,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
if (tg3_flag(tp, ASPM_WORKAROUND)) {
val = tr32(PCIE_PWR_MGMT_THRESH);
- if (!netif_carrier_ok(tp->dev))
+ if (!tp->link_up)
val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
tp->pwrmgmt_thresh;
else
@@ -6477,6 +6521,7 @@ static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
tg3_napi_disable(tp);
+ netif_carrier_off(tp->dev);
netif_tx_disable(tp->dev);
}
@@ -6488,6 +6533,9 @@ static inline void tg3_netif_start(struct tg3 *tp)
*/
netif_tx_wake_all_queues(tp->dev);
+ if (tp->link_up)
+ netif_carrier_on(tp->dev);
+
tg3_napi_enable(tp);
tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
@@ -8386,7 +8434,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
- if (!netif_carrier_ok(tp->dev))
+ if (!tp->link_up)
val = 0;
tw32(HOSTCC_STAT_COAL_TICKS, val);
@@ -8662,14 +8710,14 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
if (!tg3_flag(tp, SUPPORT_MSIX))
return;
- if (tp->irq_cnt <= 2) {
+ if (tp->rxq_cnt == 1) {
memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
return;
}
/* Validate table against current IRQ count */
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
- if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+ if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
break;
}
@@ -9679,7 +9727,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
{
struct tg3_hw_stats *sp = tp->hw_stats;
- if (!netif_carrier_ok(tp->dev))
+ if (!tp->link_up)
return;
TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
@@ -9823,11 +9871,11 @@ static void tg3_timer(unsigned long __opaque)
u32 mac_stat = tr32(MAC_STATUS);
int need_setup = 0;
- if (netif_carrier_ok(tp->dev) &&
+ if (tp->link_up &&
(mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
need_setup = 1;
}
- if (!netif_carrier_ok(tp->dev) &&
+ if (!tp->link_up &&
(mac_stat & (MAC_STATUS_PCS_SYNCED |
MAC_STATUS_SIGNAL_DET))) {
need_setup = 1;
@@ -10429,10 +10477,8 @@ static void tg3_stop(struct tg3 *tp)
{
int i;
- tg3_napi_disable(tp);
tg3_reset_task_cancel(tp);
-
- netif_tx_disable(tp->dev);
+ tg3_netif_stop(tp);
tg3_timer_stop(tp);
@@ -10481,7 +10527,7 @@ static int tg3_open(struct net_device *dev)
}
}
- netif_carrier_off(tp->dev);
+ tg3_carrier_off(tp);
err = tg3_power_up(tp);
if (err)
@@ -10514,7 +10560,7 @@ static int tg3_close(struct net_device *dev)
tg3_power_down(tp);
- netif_carrier_off(tp->dev);
+ tg3_carrier_off(tp);
return 0;
}
@@ -10888,7 +10934,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
- if (netif_running(dev) && netif_carrier_ok(dev)) {
+ if (netif_running(dev) && tp->link_up) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
cmd->lp_advertising = tp->link_config.rmt_adv;
@@ -11406,7 +11452,7 @@ static int tg3_set_channels(struct net_device *dev,
tg3_stop(tp);
- netif_carrier_off(dev);
+ tg3_carrier_off(tp);
tg3_start(tp, true, false);
@@ -11755,7 +11801,7 @@ static int tg3_test_link(struct tg3 *tp)
max = TG3_COPPER_TIMEOUT_SEC;
for (i = 0; i < max; i++) {
- if (netif_carrier_ok(tp->dev))
+ if (tp->link_up)
return 0;
if (msleep_interruptible(1000))
@@ -12326,19 +12372,19 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
if (!netif_running(tp->dev)) {
- data[0] = TG3_LOOPBACK_FAILED;
- data[1] = TG3_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
if (do_extlpbk)
- data[2] = TG3_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
goto done;
}
err = tg3_reset_hw(tp, 1);
if (err) {
- data[0] = TG3_LOOPBACK_FAILED;
- data[1] = TG3_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
if (do_extlpbk)
- data[2] = TG3_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
goto done;
}
@@ -12361,11 +12407,11 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tg3_mac_loopback(tp, true);
if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
- data[0] |= TG3_STD_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
- data[0] |= TG3_JMB_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
tg3_mac_loopback(tp, false);
}
@@ -12384,13 +12430,13 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
}
if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
- data[1] |= TG3_STD_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, TSO_CAPABLE) &&
tg3_run_loopback(tp, ETH_FRAME_LEN, true))
- data[1] |= TG3_TSO_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
- data[1] |= TG3_JMB_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
if (do_extlpbk) {
tg3_phy_lpbk_set(tp, 0, true);
@@ -12402,13 +12448,16 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
mdelay(40);
if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
- data[2] |= TG3_STD_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] |=
+ TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, TSO_CAPABLE) &&
tg3_run_loopback(tp, ETH_FRAME_LEN, true))
- data[2] |= TG3_TSO_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] |=
+ TG3_TSO_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
- data[2] |= TG3_JMB_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] |=
+ TG3_JMB_LOOPBACK_FAILED;
}
/* Re-enable gphy autopowerdown. */
@@ -12416,7 +12465,8 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tg3_phy_toggle_apd(tp, true);
}
- err = (data[0] | data[1] | data[2]) ? -EIO : 0;
+ err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
+ data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
done:
tp->phy_flags |= eee_cap;
@@ -12441,11 +12491,11 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
if (tg3_test_nvram(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[0] = 1;
+ data[TG3_NVRAM_TEST] = 1;
}
if (!doextlpbk && tg3_test_link(tp)) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[1] = 1;
+ data[TG3_LINK_TEST] = 1;
}
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int err, err2 = 0, irq_sync = 0;
@@ -12471,25 +12521,25 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
if (tg3_test_registers(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[2] = 1;
+ data[TG3_REGISTER_TEST] = 1;
}
if (tg3_test_memory(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[3] = 1;
+ data[TG3_MEMORY_TEST] = 1;
}
if (doextlpbk)
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
- if (tg3_test_loopback(tp, &data[4], doextlpbk))
+ if (tg3_test_loopback(tp, data, doextlpbk))
etest->flags |= ETH_TEST_FL_FAILED;
tg3_full_unlock(tp);
if (tg3_test_interrupt(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[7] = 1;
+ data[TG3_INTERRUPT_TEST] = 1;
}
tg3_full_lock(tp, 0);
@@ -14026,7 +14076,8 @@ out_not_found:
out_no_vpd:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
strcpy(tp->board_part_number, "BCM5718");
@@ -14397,6 +14448,7 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tg3_flag_set(tp, CPMU_PRESENT);
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
@@ -14424,6 +14476,9 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+ tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
@@ -14462,7 +14517,30 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tg3_flag_set(tp, 5705_PLUS);
}
-static int __devinit tg3_get_invariants(struct tg3 *tp)
+static bool tg3_10_100_only_device(struct tg3 *tp,
+ const struct pci_device_id *ent)
+{
+ u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ (tp->phy_flags & TG3_PHYFLG_IS_FET))
+ return true;
+
+ if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
+ return true;
+ } else {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int __devinit tg3_get_invariants(struct tg3 *tp,
+ const struct pci_device_id *ent)
{
u32 misc_ctrl_reg;
u32 pci_state_reg, grc_misc_cfg;
@@ -15141,22 +15219,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->mac_mode = 0;
- /* these are limited to 10/100 only */
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
- (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
- (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
- (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
- (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
- (tp->phy_flags & TG3_PHYFLG_IS_FET))
+ if (tg3_10_100_only_device(tp, ent))
tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
err = tg3_phy_probe(tp);
@@ -16013,6 +16076,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
@@ -16034,7 +16098,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev->netdev_ops = &tg3_netdev_ops;
dev->irq = pdev->irq;
- err = tg3_get_invariants(tp);
+ err = tg3_get_invariants(tp, ent);
if (err) {
dev_err(&pdev->dev,
"Problem fetching invariants of chip, aborting\n");
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d9308c32..4534804 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -44,12 +44,14 @@
#define TG3PCI_DEVICE_TIGON3_5761S 0x1688
#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689
#define TG3PCI_DEVICE_TIGON3_57780 0x1692
+#define TG3PCI_DEVICE_TIGON3_5787M 0x1693
#define TG3PCI_DEVICE_TIGON3_57760 0x1690
#define TG3PCI_DEVICE_TIGON3_57790 0x1694
#define TG3PCI_DEVICE_TIGON3_57788 0x1691
#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
#define TG3PCI_DEVICE_TIGON3_5717 0x1655
+#define TG3PCI_DEVICE_TIGON3_5717_C 0x1665
#define TG3PCI_DEVICE_TIGON3_5718 0x1656
#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
@@ -95,6 +97,10 @@
#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_A 0x0601
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_B 0x0612
+#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M 0x3056
+
/* 0x30 --> 0x64 unused */
#define TG3PCI_MSI_DATA 0x00000064
/* 0x66 --> 0x68 unused */
@@ -149,6 +155,7 @@
#define CHIPREV_ID_57780_A0 0x57780000
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_5717_C0 0x05717200
#define CHIPREV_ID_57765_A0 0x57785000
#define CHIPREV_ID_5719_A0 0x05719000
#define CHIPREV_ID_5720_A0 0x05720000
@@ -3262,6 +3269,7 @@ struct tg3 {
#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon_dev;
#endif
+ bool link_up;
};
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index db93191..ceb0de0 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,13 +2,10 @@
# Atmel device configuration
#
-config HAVE_NET_MACB
- bool
-
config NET_CADENCE
bool "Cadence devices"
+ depends on HAS_IOMEM
default y
- depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
Make sure you know the name of your card. Read the Ethernet-HOWTO,
@@ -25,16 +22,14 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on ARM && ARCH_AT91RM9200
select NET_CORE
- select MII
+ select MACB
---help---
If you wish to compile a kernel for the AT91RM9200 and enable
ethernet support, then you should always answer Y to this.
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAVE_NET_MACB
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 4e980a7..e7a476c 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -6,11 +6,6 @@
* Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
* Initial version by Rick Bronson 01/11/2003
*
- * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
- * (Polaroid Corporation)
- *
- * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -20,7 +15,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -31,956 +25,251 @@
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/mach-types.h>
-
-#include <mach/at91rm9200_emac.h>
-#include <asm/gpio.h>
-#include <mach/board.h>
-
-#include "at91_ether.h"
-
-#define DRV_NAME "at91_ether"
-#define DRV_VERSION "1.0"
-
-#define LINK_POLL_INTERVAL (HZ)
-
-/* ..................................................................... */
-
-/*
- * Read from a EMAC register.
- */
-static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
-{
- return __raw_readl(lp->emac_base + reg);
-}
-
-/*
- * Write to a EMAC register.
- */
-static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
-{
- __raw_writel(value, lp->emac_base + reg);
-}
-
-/* ........................... PHY INTERFACE ........................... */
-
-/*
- * Enable the MDIO bit in MAC control register
- * When not called from an interrupt-handler, access to the PHY must be
- * protected by a spinlock.
- */
-static void enable_mdi(struct at91_private *lp)
-{
- unsigned long ctl;
-
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
-}
-
-/*
- * Disable the MDIO bit in the MAC control register
- */
-static void disable_mdi(struct at91_private *lp)
-{
- unsigned long ctl;
-
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
-}
-
-/*
- * Wait until the PHY operation is complete.
- */
-static inline void at91_phy_wait(struct at91_private *lp)
-{
- unsigned long timeout = jiffies + 2;
-
- while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
- if (time_after(jiffies, timeout)) {
- printk("at91_ether: MIO timeout\n");
- break;
- }
- cpu_relax();
- }
-}
-
-/*
- * Write value to the a PHY register
- * Note: MDI interface is assumed to already have been enabled.
- */
-static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
-{
- at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
- | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
-
- /* Wait until IDLE bit in Network Status register is cleared */
- at91_phy_wait(lp);
-}
-
-/*
- * Read value stored in a PHY register.
- * Note: MDI interface is assumed to already have been enabled.
- */
-static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
-{
- at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
- | ((phy_addr & 0x1f) << 23) | (address << 18));
-
- /* Wait until IDLE bit in Network Status register is cleared */
- at91_phy_wait(lp);
-
- *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
-}
-
-/* ........................... PHY MANAGEMENT .......................... */
-
-/*
- * Access the PHY to determine the current link speed and mode, and update the
- * MAC accordingly.
- * If no link or auto-negotiation is busy, then no changes are made.
- */
-static void update_linkspeed(struct net_device *dev, int silent)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int bmsr, bmcr, lpa, mac_cfg;
- unsigned int speed, duplex;
-
- if (!mii_link_ok(&lp->mii)) { /* no link */
- netif_carrier_off(dev);
- if (!silent)
- printk(KERN_INFO "%s: Link down.\n", dev->name);
- return;
- }
-
- /* Link up, or auto-negotiation still in progress */
- read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
- read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
- if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
- if (!(bmsr & BMSR_ANEGCOMPLETE))
- return; /* Do nothing - another interrupt generated when negotiation complete */
-
- read_phy(lp, lp->phy_address, MII_LPA, &lpa);
- if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
- else speed = SPEED_10;
- if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
- else duplex = DUPLEX_HALF;
- } else {
- speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
- }
-
- /* Update the MAC */
- mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
- if (speed == SPEED_100) {
- if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
- mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
- else /* 100 Half Duplex */
- mac_cfg |= AT91_EMAC_SPD;
- } else {
- if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
- mac_cfg |= AT91_EMAC_FD;
- else {} /* 10 Half Duplex */
- }
- at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
-
- if (!silent)
- printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
- netif_carrier_on(dev);
-}
-
-/*
- * Handle interrupts from the PHY
- */
-static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
- unsigned int phy;
-
- /*
- * This hander is triggered on both edges, but the PHY chips expect
- * level-triggering. We therefore have to check if the PHY actually has
- * an IRQ pending.
- */
- enable_mdi(lp);
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
- read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
- if (!(phy & (1 << 0)))
- goto done;
- }
- else if (lp->phy_type == MII_LXT971A_ID) {
- read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
- if (!(phy & (1 << 2)))
- goto done;
- }
- else if (lp->phy_type == MII_BCM5221_ID) {
- read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
- if (!(phy & (1 << 0)))
- goto done;
- }
- else if (lp->phy_type == MII_KS8721_ID) {
- read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
- if (!(phy & ((1 << 2) | 1)))
- goto done;
- }
- else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
- read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
- if (!(phy & ((1 << 2) | 1)))
- goto done;
- }
- else if (lp->phy_type == MII_DP83848_ID) {
- read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
- if (!(phy & (1 << 7)))
- goto done;
- }
-
- update_linkspeed(dev, 0);
-
-done:
- disable_mdi(lp);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Initialize and enable the PHY interrupt for link-state changes
- */
-static void enable_phyirq(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int dsintr, irq_number;
- int status;
-
- if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
- /*
- * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
- * or board does not have it connected.
- */
- mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
- return;
- }
-
- irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
- status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
- if (status) {
- printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
- return;
- }
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
- read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
- dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
- write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
- read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
- dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
- write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
- }
- else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
- dsintr = (1 << 15) | ( 1 << 14);
- write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
- dsintr = (1 << 10) | ( 1 << 8);
- write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
- }
- else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
- read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
- dsintr = dsintr | 0x500; /* set bits 8, 10 */
- write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
- }
- else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
- read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
- dsintr = dsintr | 0x3c; /* set bits 2..5 */
- write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
- read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
- dsintr = dsintr | 0x3; /* set bits 0,1 */
- write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
- }
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-}
-
-/*
- * Disable the PHY interrupt
- */
-static void disable_phyirq(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int dsintr;
- unsigned int irq_number;
-
- if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
- del_timer_sync(&lp->check_timer);
- return;
- }
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
- read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
- dsintr = dsintr | 0xf00; /* set bits 8..11 */
- write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
- read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
- dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
- write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
- }
- else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
- read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
- dsintr = ~(1 << 14);
- write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
- read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
- dsintr = ~((1 << 10) | (1 << 8));
- write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
- }
- else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
- read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
- dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
- write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
- }
- else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
- read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
- dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
- write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
- read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
- dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
- write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
- }
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
- free_irq(irq_number, dev); /* Free interrupt handler */
-}
-
-/*
- * Perform a software reset of the PHY.
- */
-#if 0
-static void reset_phy(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int bmcr;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- /* Perform PHY reset */
- write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
-
- /* Wait until PHY reset is complete */
- do {
- read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
- } while (!(bmcr & BMCR_RESET));
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-}
-#endif
-
-static void at91ether_check_link(unsigned long dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
-
- enable_mdi(lp);
- update_linkspeed(dev, 1);
- disable_mdi(lp);
-
- mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
-}
-
-/*
- * Perform any PHY-specific initialization.
- */
-static void __init initialize_phy(struct at91_private *lp)
-{
- unsigned int val;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
- read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
- if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
- lp->phy_media = PORT_FIBRE;
- } else if (machine_is_csb337()) {
- /* mix link activity status into LED2 link state */
- write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
- } else if (machine_is_ecbat91())
- write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-}
-
-/* ......................... ADDRESS MANAGEMENT ........................ */
-
-/*
- * NOTE: Your bootloader must always set the MAC address correctly before
- * booting into Linux.
- *
- * - It must always set the MAC address after reset, even if it doesn't
- * happen to access the Ethernet while it's booting. Some versions of
- * U-Boot on the AT91RM9200-DK do not do this.
- *
- * - Likewise it must store the addresses in the correct byte order.
- * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
- * continues to do so, for bug-compatibility).
- */
-
-static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
-{
- char addr[6];
-
- if (machine_is_csb337()) {
- addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
- addr[4] = (lo & 0xff00) >> 8;
- addr[3] = (lo & 0xff0000) >> 16;
- addr[2] = (lo & 0xff000000) >> 24;
- addr[1] = (hi & 0xff);
- addr[0] = (hi & 0xff00) >> 8;
- }
- else {
- addr[0] = (lo & 0xff);
- addr[1] = (lo & 0xff00) >> 8;
- addr[2] = (lo & 0xff0000) >> 16;
- addr[3] = (lo & 0xff000000) >> 24;
- addr[4] = (hi & 0xff);
- addr[5] = (hi & 0xff00) >> 8;
- }
-
- if (is_valid_ether_addr(addr)) {
- memcpy(dev->dev_addr, &addr, 6);
- return 1;
- }
- return 0;
-}
-
-/*
- * Set the ethernet MAC address in dev->dev_addr
- */
-static void __init get_mac_address(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
-
- /* Check Specific-Address 1 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
- return;
- /* Check Specific-Address 2 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
- return;
- /* Check Specific-Address 3 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
- return;
- /* Check Specific-Address 4 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
- return;
-
- printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
-}
-
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
-static void update_mac_address(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
-
- at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
- at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
-
- at91_emac_write(lp, AT91_EMAC_SA2L, 0);
- at91_emac_write(lp, AT91_EMAC_SA2H, 0);
-}
-
-/*
- * Store the new hardware address in dev->dev_addr, and update the MAC.
- */
-static int set_mac_address(struct net_device *dev, void* addr)
-{
- struct sockaddr *address = addr;
-
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
- update_mac_address(dev);
+#include "macb.h"
- printk("%s: Setting MAC address to %pM\n", dev->name,
- dev->dev_addr);
+/* 1518 rounded up */
+#define MAX_RBUFF_SZ 0x600
+/* max number of receive buffers */
+#define MAX_RX_DESCR 9
- return 0;
-}
-
-static int inline hash_bit_value(int bitnr, __u8 *addr)
-{
- if (addr[bitnr / 8] & (1 << (bitnr % 8)))
- return 1;
- return 0;
-}
-
-/*
- * The hash address register is 64 bits long and takes up two locations in the memory map.
- * The least significant bits are stored in EMAC_HSL and the most significant
- * bits in EMAC_HSH.
- *
- * The unicast hash enable and the multicast hash enable bits in the network configuration
- * register enable the reception of hash matched frames. The destination address is
- * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
- * The hash function is an exclusive or of every sixth bit of the destination address.
- * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
- * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
- * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
- * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
- * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
- * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
- * da[0] represents the least significant bit of the first byte received, that is, the multicast/
- * unicast indicator, and da[47] represents the most significant bit of the last byte
- * received.
- * If the hash index points to a bit that is set in the hash register then the frame will be
- * matched according to whether the frame is multicast or unicast.
- * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
- * the hash index points to a bit set in the hash register.
- * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
- * hash index points to a bit set in the hash register.
- * To receive all multicast frames, the hash register should be set with all ones and the
- * multicast hash enable bit should be set in the network configuration register.
- */
-
-/*
- * Return the hash index value for the specified address.
- */
-static int hash_get_index(__u8 *addr)
-{
- int i, j, bitval;
- int hash_index = 0;
-
- for (j = 0; j < 6; j++) {
- for (i = 0, bitval = 0; i < 8; i++)
- bitval ^= hash_bit_value(i*6 + j, addr);
-
- hash_index |= (bitval << j);
- }
-
- return hash_index;
-}
-
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
-static void at91ether_sethashtable(struct net_device *dev)
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- unsigned long mc_filter[2];
- unsigned int bitnr;
-
- mc_filter[0] = mc_filter[1] = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- bitnr = hash_get_index(ha->addr);
- mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
- }
-
- at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
- at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
-}
+ struct macb *lp = netdev_priv(dev);
+ dma_addr_t addr;
+ u32 ctl;
+ int i;
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
-static void at91ether_set_multicast_list(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned long cfg;
-
- cfg = at91_emac_read(lp, AT91_EMAC_CFG);
-
- if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
- cfg |= AT91_EMAC_CAF;
- else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
- cfg &= ~AT91_EMAC_CAF;
-
- if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
- at91_emac_write(lp, AT91_EMAC_HSH, -1);
- at91_emac_write(lp, AT91_EMAC_HSL, -1);
- cfg |= AT91_EMAC_MTI;
- } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
- at91ether_sethashtable(dev);
- cfg |= AT91_EMAC_MTI;
- } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
- at91_emac_write(lp, AT91_EMAC_HSH, 0);
- at91_emac_write(lp, AT91_EMAC_HSL, 0);
- cfg &= ~AT91_EMAC_MTI;
+ lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring) {
+ netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+ return -ENOMEM;
}
- at91_emac_write(lp, AT91_EMAC_CFG, cfg);
-}
-
-/* ......................... ETHTOOL SUPPORT ........................... */
-
-static int mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int value;
-
- read_phy(lp, phy_id, location, &value);
- return value;
-}
-
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
-{
- struct at91_private *lp = netdev_priv(dev);
-
- write_phy(lp, phy_id, location, value);
-}
-
-static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ret;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- ret = mii_ethtool_gset(&lp->mii, cmd);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
+ lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
+ if (!lp->rx_buffers) {
+ netdev_err(dev, "unable to alloc rx data DMA buffer\n");
- if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
- cmd->supported = SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+ return -ENOMEM;
}
- return ret;
-}
-
-static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ret;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- ret = mii_ethtool_sset(&lp->mii, cmd);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- return ret;
-}
-
-static int at91ether_nwayreset(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ret;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- ret = mii_nway_restart(&lp->mii);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- return ret;
-}
-
-static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
-}
-
-static const struct ethtool_ops at91ether_ethtool_ops = {
- .get_settings = at91ether_get_settings,
- .set_settings = at91ether_set_settings,
- .get_drvinfo = at91ether_get_drvinfo,
- .nway_reset = at91ether_nwayreset,
- .get_link = ethtool_op_get_link,
-};
-
-static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct at91_private *lp = netdev_priv(dev);
- int res;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
- res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- return res;
-}
-
-/* ................................ MAC ................................ */
-
-/*
- * Initialize and start the Receiver and Transmit subsystems
- */
-static void at91ether_start(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- struct recv_desc_bufs *dlist, *dlist_phys;
- int i;
- unsigned long ctl;
-
- dlist = lp->dlist;
- dlist_phys = lp->dlist_phys;
-
+ addr = lp->rx_buffers_dma;
for (i = 0; i < MAX_RX_DESCR; i++) {
- dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
- dlist->descriptors[i].size = 0;
+ lp->rx_ring[i].addr = addr;
+ lp->rx_ring[i].ctrl = 0;
+ addr += MAX_RBUFF_SZ;
}
/* Set the Wrap bit on the last descriptor */
- dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
+ lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
- lp->rxBuffIndex = 0;
+ lp->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
- at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys);
+ macb_writel(lp, RBQP, lp->rx_ring_dma);
/* Enable Receive and Transmit */
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+ return 0;
}
-/*
- * Open the ethernet interface
- */
+/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- unsigned long ctl;
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
+ int ret;
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
- clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
-
/* Clear internal statistics */
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
- /* Update the MAC address (incase user has changed it) */
- update_mac_address(dev);
+ macb_set_hwaddr(lp);
- /* Enable PHY interrupt */
- enable_phyirq(dev);
+ ret = at91ether_start(dev);
+ if (ret)
+ return ret;
/* Enable MAC interrupts */
- at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
- | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
- | AT91_EMAC_ROVR | AT91_EMAC_ABT);
-
- /* Determine current link speed */
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
- update_linkspeed(dev, 0);
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- at91ether_start(dev);
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* schedule a link state check */
+ phy_start(lp->phy_dev);
+
netif_start_queue(dev);
+
return 0;
}
-/*
- * Close the interface
- */
+/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- unsigned long ctl;
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
/* Disable Receiver and Transmitter */
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
-
- /* Disable PHY interrupt */
- disable_phyirq(dev);
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
/* Disable MAC interrupts */
- at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
- | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
- | AT91_EMAC_ROVR | AT91_EMAC_ABT);
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
netif_stop_queue(dev);
- clk_disable(lp->ether_clk); /* Disable Peripheral clock */
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ lp->rx_buffers, lp->rx_buffers_dma);
+ lp->rx_buffers = NULL;
return 0;
}
-/*
- * Transmit packet.
- */
+/* Transmit packet */
static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
+ struct macb *lp = netdev_priv(dev);
- if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
+ if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
netif_stop_queue(dev);
/* Store packet information (to free when Tx completed) */
lp->skb = skb;
lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
- dev->stats.tx_bytes += skb->len;
+ lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
/* Set address of the data in the Transmit Address register */
- at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr);
+ macb_writel(lp, TAR, lp->skb_physaddr);
/* Set length of the packet in the Transmit Control register */
- at91_emac_write(lp, AT91_EMAC_TCR, skb->len);
+ macb_writel(lp, TCR, skb->len);
} else {
- printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
- return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
- on this skb, he also reports -ENETDOWN and printk's, so either
- we free and return(0) or don't free and return 1 */
+ netdev_err(dev, "%s called, but device is busy!\n", __func__);
+ return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
-/*
- * Update the current statistics from the internal statistics registers.
- */
-static struct net_device_stats *at91ether_stats(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ale, lenerr, seqe, lcol, ecol;
-
- if (netif_running(dev)) {
- dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */
- ale = at91_emac_read(lp, AT91_EMAC_ALE);
- dev->stats.rx_frame_errors += ale; /* Alignment errors */
- lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
- dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
- seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
- dev->stats.rx_crc_errors += seqe; /* CRC error */
- dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
- dev->stats.rx_errors += (ale + lenerr + seqe
- + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
-
- dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */
- dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */
- dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */
- dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
-
- lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
- ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
- dev->stats.tx_window_errors += lcol; /* Late collisions */
- dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
-
- dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
- }
- return &dev->stats;
-}
-
-/*
- * Extract received frame from buffer descriptors and sent to upper layers.
+/* Extract received frame from buffer descriptors and sent to upper layers.
* (Called from interrupt context)
*/
static void at91ether_rx(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- struct recv_desc_bufs *dlist;
+ struct macb *lp = netdev_priv(dev);
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- dlist = lp->dlist;
- while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
- p_recv = dlist->recv_buf[lp->rxBuffIndex];
- pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
+ while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+ p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
+ pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_bytes += pktlen;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
netif_rx(skb);
+ } else {
+ lp->stats.rx_dropped++;
+ netdev_notice(dev, "Memory squeeze, dropping packet.\n");
}
- else {
- dev->stats.rx_dropped += 1;
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- }
- if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
- dev->stats.multicast++;
+ if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+ lp->stats.multicast++;
+
+ /* reset ownership bit */
+ lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
- dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
- if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
- lp->rxBuffIndex = 0;
+ /* wrap after last buffer */
+ if (lp->rx_tail == MAX_RX_DESCR - 1)
+ lp->rx_tail = 0;
else
- lp->rxBuffIndex++;
+ lp->rx_tail++;
}
}
-/*
- * MAC interrupt handler
- */
+/* MAC interrupt handler */
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
- unsigned long intstatus, ctl;
+ struct net_device *dev = dev_id;
+ struct macb *lp = netdev_priv(dev);
+ u32 intstatus, ctl;
/* MAC Interrupt Status register indicates what interrupts are pending.
- It is automatically cleared once read. */
- intstatus = at91_emac_read(lp, AT91_EMAC_ISR);
+ * It is automatically cleared once read.
+ */
+ intstatus = macb_readl(lp, ISR);
- if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
+ /* Receive complete */
+ if (intstatus & MACB_BIT(RCOMP))
at91ether_rx(dev);
- if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
- /* The TCOM bit is set even if the transmission failed. */
- if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
- dev->stats.tx_errors += 1;
+ /* Transmit complete */
+ if (intstatus & MACB_BIT(TCOMP)) {
+ /* The TCOM bit is set even if the transmission failed */
+ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+ lp->stats.tx_errors++;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->skb_length;
}
netif_wake_queue(dev);
}
- /* Work-around for Errata #11 */
- if (intstatus & AT91_EMAC_RBNA) {
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
+ /* Work-around for EMAC Errata section 41.3.1 */
+ if (intstatus & MACB_BIT(RXUBR)) {
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE));
}
- if (intstatus & AT91_EMAC_ROVR)
- printk("%s: ROVR error\n", dev->name);
+ if (intstatus & MACB_BIT(ISR_ROVR))
+ netdev_err(dev, "ROVR error\n");
return IRQ_HANDLED;
}
@@ -1000,10 +289,10 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = at91ether_stats,
- .ndo_set_rx_mode = at91ether_set_multicast_list,
- .ndo_set_mac_address = set_mac_address,
- .ndo_do_ioctl = at91ether_ioctl,
+ .ndo_get_stats = macb_get_stats,
+ .ndo_set_rx_mode = macb_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1011,197 +300,160 @@ static const struct net_device_ops at91ether_netdev_ops = {
#endif
};
-/*
- * Detect the PHY type, and its address.
- */
-static int __init at91ether_phy_detect(struct at91_private *lp)
+#if defined(CONFIG_OF)
+static const struct of_device_id at91ether_dt_ids[] = {
+ { .compatible = "cdns,at91rm9200-emac" },
+ { .compatible = "cdns,emac" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
+
+static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
{
- unsigned int phyid1, phyid2;
- unsigned long phy_id;
- unsigned short phy_address = 0;
-
- while (phy_address < PHY_MAX_ADDR) {
- /* Read the PHY ID registers */
- enable_mdi(lp);
- read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
- read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
- disable_mdi(lp);
-
- phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
- switch (phy_id) {
- case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
- case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
- case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
- case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
- case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
- case MII_DP83847_ID: /* National Semiconductor DP83847: */
- case MII_DP83848_ID: /* National Semiconductor DP83848: */
- case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
- case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
- case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
- case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
- /* store detected values */
- lp->phy_type = phy_id; /* Type of PHY connected */
- lp->phy_address = phy_address; /* MDI address of PHY */
- return 1;
- }
+ struct device_node *np = pdev->dev.of_node;
- phy_address++;
- }
+ if (np)
+ return of_get_phy_mode(np);
- return 0; /* not detected */
+ return -ENODEV;
}
+static int at91ether_get_hwaddr_dt(struct macb *bp)
+{
+ struct device_node *np = bp->pdev->dev.of_node;
-/*
- * Detect MAC & PHY and perform ethernet interface initialization
- */
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac) {
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+#else
+static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+static int at91ether_get_hwaddr_dt(struct macb *bp)
+{
+ return -ENODEV;
+}
+#endif
+
+/* Detect MAC & PHY and perform ethernet interface initialization */
static int __init at91ether_probe(struct platform_device *pdev)
{
struct macb_platform_data *board_data = pdev->dev.platform_data;
struct resource *regs;
struct net_device *dev;
- struct at91_private *lp;
+ struct phy_device *phydev;
+ struct pinctrl *pinctrl;
+ struct macb *lp;
int res;
+ u32 reg;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENOENT;
- dev = alloc_etherdev(sizeof(struct at91_private));
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ res = PTR_ERR(pinctrl);
+ if (res == -EPROBE_DEFER)
+ return res;
+
+ dev_warn(&pdev->dev, "No pinctrl provided\n");
+ }
+
+ dev = alloc_etherdev(sizeof(struct macb));
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
- lp->board_data = *board_data;
+ lp->pdev = pdev;
+ lp->dev = dev;
spin_lock_init(&lp->lock);
- dev->base_addr = regs->start; /* physical base address */
- lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1);
- if (!lp->emac_base) {
+ /* physical base address */
+ dev->base_addr = regs->start;
+ lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!lp->regs) {
res = -ENOMEM;
goto err_free_dev;
}
/* Clock */
- lp->ether_clk = clk_get(&pdev->dev, "ether_clk");
- if (IS_ERR(lp->ether_clk)) {
- res = PTR_ERR(lp->ether_clk);
- goto err_ioumap;
+ lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(lp->pclk)) {
+ res = PTR_ERR(lp->pclk);
+ goto err_free_dev;
}
- clk_enable(lp->ether_clk);
+ clk_enable(lp->pclk);
/* Install the interrupt handler */
dev->irq = platform_get_irq(pdev, 0);
- if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
- res = -EBUSY;
+ res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
+ if (res)
goto err_disable_clock;
- }
-
- /* Allocate memory for DMA Receive descriptors */
- lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
- if (lp->dlist == NULL) {
- res = -ENOMEM;
- goto err_free_irq;
- }
ether_setup(dev);
dev->netdev_ops = &at91ether_netdev_ops;
- dev->ethtool_ops = &at91ether_ethtool_ops;
+ dev->ethtool_ops = &macb_ethtool_ops;
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
- update_mac_address(dev); /* Program ethernet address into MAC */
-
- at91_emac_write(lp, AT91_EMAC_CTL, 0);
+ res = at91ether_get_hwaddr_dt(lp);
+ if (res < 0)
+ macb_get_hwaddr(lp);
- if (board_data->is_rmii)
- at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
- else
- at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
-
- /* Detect PHY */
- if (!at91ether_phy_detect(lp)) {
- printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n");
- res = -ENODEV;
- goto err_free_dmamem;
+ res = at91ether_get_phy_mode_dt(pdev);
+ if (res < 0) {
+ if (board_data && board_data->is_rmii)
+ lp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ lp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ lp->phy_interface = res;
}
- initialize_phy(lp);
+ macb_writel(lp, NCR, 0);
+
+ reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+ if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ reg |= MACB_BIT(RM9200_RMII);
- lp->mii.dev = dev; /* Support for ethtool */
- lp->mii.mdio_read = mdio_read;
- lp->mii.mdio_write = mdio_write;
- lp->mii.phy_id = lp->phy_address;
- lp->mii.phy_id_mask = 0x1f;
- lp->mii.reg_num_mask = 0x1f;
+ macb_writel(lp, NCFGR, reg);
/* Register the network interface */
res = register_netdev(dev);
if (res)
- goto err_free_dmamem;
-
- /* Determine current link speed */
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
- update_linkspeed(dev, 0);
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
- netif_carrier_off(dev); /* will be enabled in open() */
-
- /* If board has no PHY IRQ, use a timer to poll the PHY */
- if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
- gpio_request(board_data->phy_irq_pin, "ethernet_phy");
- } else {
- /* If board has no PHY IRQ, use a timer to poll the PHY */
- init_timer(&lp->check_timer);
- lp->check_timer.data = (unsigned long)dev;
- lp->check_timer.function = at91ether_check_link;
- }
+ goto err_disable_clock;
+
+ if (macb_mii_init(lp) != 0)
+ goto err_out_unregister_netdev;
+
+ /* will be enabled in open() */
+ netif_carrier_off(dev);
+
+ phydev = lp->phy_dev;
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev),
+ phydev->irq);
/* Display ethernet banner */
- printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
- dev->name, (uint) dev->base_addr, dev->irq,
- at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
- at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
- dev->dev_addr);
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
- printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
- else if (lp->phy_type == MII_LXT971A_ID)
- printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
- else if (lp->phy_type == MII_RTL8201_ID)
- printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
- else if (lp->phy_type == MII_BCM5221_ID)
- printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
- else if (lp->phy_type == MII_DP83847_ID)
- printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
- else if (lp->phy_type == MII_DP83848_ID)
- printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
- else if (lp->phy_type == MII_AC101L_ID)
- printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
- else if (lp->phy_type == MII_KS8721_ID)
- printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
- else if (lp->phy_type == MII_T78Q21x3_ID)
- printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
- else if (lp->phy_type == MII_LAN83C185_ID)
- printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
-
- clk_disable(lp->ether_clk); /* Disable Peripheral clock */
+ netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
+ dev->base_addr, dev->irq, dev->dev_addr);
return 0;
-
-err_free_dmamem:
- platform_set_drvdata(pdev, NULL);
- dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
-err_free_irq:
- free_irq(dev->irq, dev);
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_disable_clock:
- clk_disable(lp->ether_clk);
- clk_put(lp->ether_clk);
-err_ioumap:
- iounmap(lp->emac_base);
+ clk_disable(lp->pclk);
err_free_dev:
free_netdev(dev);
return res;
@@ -1210,38 +462,33 @@ err_free_dev:
static int __devexit at91ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct at91_private *lp = netdev_priv(dev);
+ struct macb *lp = netdev_priv(dev);
- if (gpio_is_valid(lp->board_data.phy_irq_pin))
- gpio_free(lp->board_data.phy_irq_pin);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
- clk_put(lp->ether_clk);
-
- platform_set_drvdata(pdev, NULL);
+ clk_disable(lp->pclk);
free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
#ifdef CONFIG_PM
-
static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
- struct at91_private *lp = netdev_priv(net_dev);
+ struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
- int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
- disable_irq(phy_irq);
- }
-
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
- clk_disable(lp->ether_clk);
+ clk_disable(lp->pclk);
}
return 0;
}
@@ -1249,22 +496,16 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
static int at91ether_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
- struct at91_private *lp = netdev_priv(net_dev);
+ struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- clk_enable(lp->ether_clk);
+ clk_enable(lp->pclk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
-
- if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
- int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
- enable_irq(phy_irq);
- }
}
return 0;
}
-
#else
#define at91ether_suspend NULL
#define at91ether_resume NULL
@@ -1275,8 +516,9 @@ static struct platform_driver at91ether_driver = {
.suspend = at91ether_suspend,
.resume = at91ether_resume,
.driver = {
- .name = DRV_NAME,
+ .name = "at91_ether",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91ether_dt_ids),
},
};
@@ -1296,4 +538,4 @@ module_exit(at91ether_exit)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
MODULE_AUTHOR("Andrew Victor");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
deleted file mode 100644
index 0ef6328..0000000
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Ethernet driver for the Atmel AT91RM9200 (Thunder)
- *
- * Copyright (C) SAN People (Pty) Ltd
- *
- * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
- * Initial version by Rick Bronson.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef AT91_ETHERNET
-#define AT91_ETHERNET
-
-
-/* Davicom 9161 PHY */
-#define MII_DM9161_ID 0x0181b880
-#define MII_DM9161A_ID 0x0181b8a0
-#define MII_DSCR_REG 16
-#define MII_DSCSR_REG 17
-#define MII_DSINTR_REG 21
-
-/* Intel LXT971A PHY */
-#define MII_LXT971A_ID 0x001378E0
-#define MII_ISINTE_REG 18
-#define MII_ISINTS_REG 19
-#define MII_LEDCTRL_REG 20
-
-/* Realtek RTL8201 PHY */
-#define MII_RTL8201_ID 0x00008200
-
-/* Broadcom BCM5221 PHY */
-#define MII_BCM5221_ID 0x004061e0
-#define MII_BCMINTR_REG 26
-
-/* National Semiconductor DP83847 */
-#define MII_DP83847_ID 0x20005c30
-
-/* National Semiconductor DP83848 */
-#define MII_DP83848_ID 0x20005c90
-#define MII_DPPHYSTS_REG 16
-#define MII_DPMICR_REG 17
-#define MII_DPMISR_REG 18
-
-/* Altima AC101L PHY */
-#define MII_AC101L_ID 0x00225520
-
-/* Micrel KS8721 PHY */
-#define MII_KS8721_ID 0x00221610
-
-/* Teridian 78Q2123/78Q2133 */
-#define MII_T78Q21x3_ID 0x000e7230
-#define MII_T78Q21INT_REG 17
-
-/* SMSC LAN83C185 */
-#define MII_LAN83C185_ID 0x0007C0A0
-
-/* ........................................................................ */
-
-#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
-#define MAX_RX_DESCR 9 /* max number of receive buffers */
-
-#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
-#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
-
-#define EMAC_BROADCAST 0x80000000 /* broadcast address */
-#define EMAC_MULTICAST 0x40000000 /* multicast address */
-#define EMAC_UNICAST 0x20000000 /* unicast address */
-
-struct rbf_t
-{
- unsigned int addr;
- unsigned long size;
-};
-
-struct recv_desc_bufs
-{
- struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
- char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
-};
-
-struct at91_private
-{
- struct mii_if_info mii; /* ethtool support */
- struct macb_platform_data board_data; /* board-specific
- * configuration (shared with
- * macb for common data */
- void __iomem *emac_base; /* base register address */
- struct clk *ether_clk; /* clock */
-
- /* PHY */
- unsigned long phy_type; /* type of PHY (PHY_ID) */
- spinlock_t lock; /* lock for MDI interface */
- short phy_media; /* media interface type */
- unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
- struct timer_list check_timer; /* Poll link status */
-
- /* Transmit */
- struct sk_buff *skb; /* holds skb until xmit interrupt completes */
- dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
- int skb_length; /* saved skb length for pci_unmap_single */
-
- /* Receive */
- int rxBuffIndex; /* index into receive descriptor list */
- struct recv_desc_bufs *dlist; /* descriptor list address */
- struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
-};
-
-#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 033064b..edb2aba 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -26,37 +27,79 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
#include "macb.h"
#define RX_BUFFER_SIZE 128
-#define RX_RING_SIZE 512
-#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
+#define RX_RING_SIZE 512 /* must be power of 2 */
+#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
-/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
-#define RX_OFFSET 2
-
-#define TX_RING_SIZE 128
-#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
-#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
-
-#define TX_RING_GAP(bp) \
- (TX_RING_SIZE - (bp)->tx_pending)
-#define TX_BUFFS_AVAIL(bp) \
- (((bp)->tx_tail <= (bp)->tx_head) ? \
- (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
- (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
-#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
-
-#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
+#define TX_RING_SIZE 128 /* must be power of 2 */
+#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
/* minimum number of free TX descriptors before waking up TX process */
#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
| MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
+ | MACB_BIT(ISR_RLE) \
+ | MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT 1230
+
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+ return index & (TX_RING_SIZE - 1);
+}
+
+static unsigned int macb_tx_ring_avail(struct macb *bp)
+{
+ return (bp->tx_tail - bp->tx_head) & (TX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
-static void __macb_set_hwaddr(struct macb *bp)
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+{
+ dma_addr_t offset;
+
+ offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+
+ return bp->tx_ring_dma + offset;
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+ return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+ return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
+}
+
+void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
u16 top;
@@ -65,31 +108,58 @@ static void __macb_set_hwaddr(struct macb *bp)
macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+
+ /* Clear unused address register sets */
+ macb_or_gem_writel(bp, SA2B, 0);
+ macb_or_gem_writel(bp, SA2T, 0);
+ macb_or_gem_writel(bp, SA3B, 0);
+ macb_or_gem_writel(bp, SA3T, 0);
+ macb_or_gem_writel(bp, SA4B, 0);
+ macb_or_gem_writel(bp, SA4T, 0);
}
+EXPORT_SYMBOL_GPL(macb_set_hwaddr);
-static void __init macb_get_hwaddr(struct macb *bp)
+void macb_get_hwaddr(struct macb *bp)
{
+ struct macb_platform_data *pdata;
u32 bottom;
u16 top;
u8 addr[6];
+ int i;
- bottom = macb_or_gem_readl(bp, SA1B);
- top = macb_or_gem_readl(bp, SA1T);
+ pdata = bp->pdev->dev.platform_data;
- addr[0] = bottom & 0xff;
- addr[1] = (bottom >> 8) & 0xff;
- addr[2] = (bottom >> 16) & 0xff;
- addr[3] = (bottom >> 24) & 0xff;
- addr[4] = top & 0xff;
- addr[5] = (top >> 8) & 0xff;
+ /* Check all 4 address register for vaild address */
+ for (i = 0; i < 4; i++) {
+ bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+ top = macb_or_gem_readl(bp, SA1T + i * 8);
+
+ if (pdata && pdata->rev_eth_addr) {
+ addr[5] = bottom & 0xff;
+ addr[4] = (bottom >> 8) & 0xff;
+ addr[3] = (bottom >> 16) & 0xff;
+ addr[2] = (bottom >> 24) & 0xff;
+ addr[1] = top & 0xff;
+ addr[0] = (top & 0xff00) >> 8;
+ } else {
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
+ }
- if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
- } else {
- netdev_info(bp->dev, "invalid hw address, using random\n");
- eth_hw_addr_random(bp->dev);
+ if (is_valid_ether_addr(addr)) {
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ return;
+ }
}
+
+ netdev_info(bp->dev, "invalid hw address, using random\n");
+ eth_hw_addr_random(bp->dev);
}
+EXPORT_SYMBOL_GPL(macb_get_hwaddr);
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
@@ -152,13 +222,17 @@ static void macb_handle_link_change(struct net_device *dev)
reg = macb_readl(bp, NCFGR);
reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+ if (macb_is_gem(bp))
+ reg &= ~GEM_BIT(GBE);
if (phydev->duplex)
reg |= MACB_BIT(FD);
if (phydev->speed == SPEED_100)
reg |= MACB_BIT(SPD);
+ if (phydev->speed == SPEED_1000)
+ reg |= GEM_BIT(GBE);
- macb_writel(bp, NCFGR, reg);
+ macb_or_gem_writel(bp, NCFGR, reg);
bp->speed = phydev->speed;
bp->duplex = phydev->duplex;
@@ -196,7 +270,9 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_platform_data *pdata;
struct phy_device *phydev;
+ int phy_irq;
int ret;
phydev = phy_find_first(bp->mii_bus);
@@ -205,7 +281,14 @@ static int macb_mii_probe(struct net_device *dev)
return -1;
}
- /* TODO : add pin_irq */
+ pdata = dev_get_platdata(&bp->pdev->dev);
+ if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ }
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
@@ -216,7 +299,10 @@ static int macb_mii_probe(struct net_device *dev)
}
/* mask with MAC supported features */
- phydev->supported &= PHY_BASIC_FEATURES;
+ if (macb_is_gem(bp))
+ phydev->supported &= PHY_GBIT_FEATURES;
+ else
+ phydev->supported &= PHY_BASIC_FEATURES;
phydev->advertising = phydev->supported;
@@ -228,7 +314,7 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-static int macb_mii_init(struct macb *bp)
+int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
int err = -ENXIO, i;
@@ -284,6 +370,7 @@ err_out_free_mdiobus:
err_out:
return err;
}
+EXPORT_SYMBOL_GPL(macb_mii_init);
static void macb_update_stats(struct macb *bp)
{
@@ -297,93 +384,147 @@ static void macb_update_stats(struct macb *bp)
*p += __raw_readl(reg);
}
-static void macb_tx(struct macb *bp)
+static int macb_halt_tx(struct macb *bp)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
+ unsigned long halt_time, timeout;
+ u32 status;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
+ timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+ do {
+ halt_time = jiffies;
+ status = macb_readl(bp, TSR);
+ if (!(status & MACB_BIT(TGO)))
+ return 0;
- if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
- int i;
- netdev_err(bp->dev, "TX %s, resetting buffers\n",
- status & MACB_BIT(UND) ?
- "underrun" : "retry limit exceeded");
+ usleep_range(10, 250);
+ } while (time_before(halt_time, timeout));
- /* Transfer ongoing, disable transmitter, to avoid confusion */
- if (status & MACB_BIT(TGO))
- macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
+ return -ETIMEDOUT;
+}
- head = bp->tx_head;
+static void macb_tx_error_task(struct work_struct *work)
+{
+ struct macb *bp = container_of(work, struct macb, tx_error_task);
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ unsigned int tail;
- /*Mark all the buffer as used to avoid sending a lost buffer*/
- for (i = 0; i < TX_RING_SIZE; i++)
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+ bp->tx_tail, bp->tx_head);
- /* Add wrap bit */
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ /* Make sure nobody is trying to queue up new packets */
+ netif_stop_queue(bp->dev);
- /* free transmit buffer in upper layer*/
- for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
- struct ring_info *rp = &bp->tx_skb[tail];
- struct sk_buff *skb = rp->skb;
+ /*
+ * Stop transmission now
+ * (in case we have just queued new packets)
+ */
+ if (macb_halt_tx(bp))
+ /* Just complain for now, reinitializing TX path can be good */
+ netdev_err(bp->dev, "BUG: halt tx timed out\n");
- BUG_ON(skb == NULL);
+ /* No need for the lock here as nobody will interrupt us anymore */
- rmb();
+ /*
+ * Treat frames in TX queue including the ones that caused the error.
+ * Free transmit buffers in upper layer.
+ */
+ for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+ struct macb_dma_desc *desc;
+ u32 ctrl;
+
+ desc = macb_tx_desc(bp, tail);
+ ctrl = desc->ctrl;
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ if (ctrl & MACB_BIT(TX_USED)) {
+ netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ } else {
+ /*
+ * "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about those.
+ * Statistics are updated by hardware.
+ */
+ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+ netdev_err(bp->dev,
+ "BUG: TX buffers exhausted mid-frame\n");
- dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
- DMA_TO_DEVICE);
- rp->skb = NULL;
- dev_kfree_skb_irq(skb);
+ desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- bp->tx_head = bp->tx_tail = 0;
-
- /* Enable the transmitter again */
- if (status & MACB_BIT(TGO))
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+ DMA_TO_DEVICE);
+ tx_skb->skb = NULL;
+ dev_kfree_skb(skb);
}
- if (!(status & MACB_BIT(COMP)))
- /*
- * This may happen when a buffer becomes complete
- * between reading the ISR and scanning the
- * descriptors. Nothing to worry about.
- */
- return;
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ /* Reinitialize the TX desc queue */
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+ /* Make TX ring reflect state of hardware */
+ bp->tx_head = bp->tx_tail = 0;
+
+ /* Now we are ready to start transmission again */
+ netif_wake_queue(bp->dev);
+
+ /* Housework before enabling TX IRQ */
+ macb_writel(bp, TSR, macb_readl(bp, TSR));
+ macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+ unsigned int tail;
+ unsigned int head;
+ u32 status;
+
+ status = macb_readl(bp, TSR);
+ macb_writel(bp, TSR, status);
+
+ netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+ (unsigned long)status);
head = bp->tx_head;
- for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
- struct ring_info *rp = &bp->tx_skb[tail];
- struct sk_buff *skb = rp->skb;
- u32 bufstat;
+ for (tail = bp->tx_tail; tail != head; tail++) {
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ u32 ctrl;
- BUG_ON(skb == NULL);
+ desc = macb_tx_desc(bp, tail);
+ /* Make hw descriptor updates visible to CPU */
rmb();
- bufstat = bp->tx_ring[tail].ctrl;
- if (!(bufstat & MACB_BIT(TX_USED)))
+ ctrl = desc->ctrl;
+
+ if (!(ctrl & MACB_BIT(TX_USED)))
break;
- netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
- tail, skb->data);
- dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
DMA_TO_DEVICE);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
- rp->skb = NULL;
+ tx_skb->skb = NULL;
dev_kfree_skb_irq(skb);
}
bp->tx_tail = tail;
- if (netif_queue_stopped(bp->dev) &&
- TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+ if (netif_queue_stopped(bp->dev)
+ && macb_tx_ring_avail(bp) > MACB_TX_WAKEUP_THRESH)
netif_wake_queue(bp->dev);
}
@@ -392,31 +533,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
{
unsigned int len;
unsigned int frag;
- unsigned int offset = 0;
+ unsigned int offset;
struct sk_buff *skb;
+ struct macb_dma_desc *desc;
- len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+ desc = macb_rx_desc(bp, last_frag);
+ len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
- netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- first_frag, last_frag, len);
+ netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
- skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
+ /*
+ * The ethernet header starts NET_IP_ALIGN bytes into the
+ * first buffer. Since the header is 14 bytes, this makes the
+ * payload word-aligned.
+ *
+ * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+ * the two padding bytes into the skb so that we avoid hitting
+ * the slowpath in memcpy(), and pull them off afterwards.
+ */
+ skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
if (!skb) {
bp->stats.rx_dropped++;
- for (frag = first_frag; ; frag = NEXT_RX(frag)) {
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ for (frag = first_frag; ; frag++) {
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
}
+
+ /* Make descriptor updates visible to hardware */
wmb();
+
return 1;
}
- skb_reserve(skb, RX_OFFSET);
+ offset = 0;
+ len += NET_IP_ALIGN;
skb_checksum_none_assert(skb);
skb_put(skb, len);
- for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+ for (frag = first_frag; ; frag++) {
unsigned int frag_len = RX_BUFFER_SIZE;
if (offset + frag_len > len) {
@@ -424,22 +582,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- (bp->rx_buffers +
- (RX_BUFFER_SIZE * frag)),
- frag_len);
+ macb_rx_buffer(bp, frag), frag_len);
offset += RX_BUFFER_SIZE;
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
- wmb();
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
}
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ __skb_pull(skb, NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, bp->dev);
bp->stats.rx_packets++;
- bp->stats.rx_bytes += len;
- netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ bp->stats.rx_bytes += skb->len;
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
netif_receive_skb(skb);
@@ -452,8 +612,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
{
unsigned int frag;
- for (frag = begin; frag != end; frag = NEXT_RX(frag))
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ for (frag = begin; frag != end; frag++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
+ }
+
+ /* Make descriptor updates visible to hardware */
wmb();
/*
@@ -466,15 +630,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
static int macb_rx(struct macb *bp, int budget)
{
int received = 0;
- unsigned int tail = bp->rx_tail;
+ unsigned int tail;
int first_frag = -1;
- for (; budget > 0; tail = NEXT_RX(tail)) {
+ for (tail = bp->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
u32 addr, ctrl;
+ /* Make hw descriptor updates visible to CPU */
rmb();
- addr = bp->rx_ring[tail].addr;
- ctrl = bp->rx_ring[tail].ctrl;
+
+ addr = desc->addr;
+ ctrl = desc->ctrl;
if (!(addr & MACB_BIT(RX_USED)))
break;
@@ -517,7 +684,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
- netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
@@ -552,10 +719,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
while (status) {
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
- macb_writel(bp, IDR, ~0UL);
+ macb_writel(bp, IDR, -1);
break;
}
+ netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
if (status & MACB_RX_INT_FLAGS) {
/*
* There's no point taking any more interrupts
@@ -567,14 +736,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
- netdev_dbg(bp->dev, "scheduling RX softirq\n");
+ netdev_vdbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi);
}
}
- if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE)))
- macb_tx(bp);
+ if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+ macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+ schedule_work(&bp->tx_error_task);
+ break;
+ }
+
+ if (status & MACB_BIT(TCOMP))
+ macb_tx_interrupt(bp);
/*
* Link change detection isn't possible with RMII, so we'll
@@ -626,11 +800,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb *bp = netdev_priv(dev);
dma_addr_t mapping;
unsigned int len, entry;
+ struct macb_dma_desc *desc;
+ struct macb_tx_skb *tx_skb;
u32 ctrl;
unsigned long flags;
-#ifdef DEBUG
- netdev_dbg(bp->dev,
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev,
"start_xmit: len %u head %p data %p tail %p end %p\n",
skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -642,7 +818,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
- if (TX_BUFFS_AVAIL(bp) < 1) {
+ if (macb_tx_ring_avail(bp) < 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -651,13 +827,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- entry = bp->tx_head;
- netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
+ entry = macb_tx_ring_wrap(bp->tx_head);
+ bp->tx_head++;
+ netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- bp->tx_skb[entry].skb = skb;
- bp->tx_skb[entry].mapping = mapping;
- netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+
+ tx_skb = &bp->tx_skb[entry];
+ tx_skb->skb = skb;
+ tx_skb->mapping = mapping;
+ netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
skb->data, (unsigned long)mapping);
ctrl = MACB_BF(TX_FRMLEN, len);
@@ -665,18 +844,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (entry == (TX_RING_SIZE - 1))
ctrl |= MACB_BIT(TX_WRAP);
- bp->tx_ring[entry].addr = mapping;
- bp->tx_ring[entry].ctrl = ctrl;
- wmb();
+ desc = &bp->tx_ring[entry];
+ desc->addr = mapping;
+ desc->ctrl = ctrl;
- entry = NEXT_TX(entry);
- bp->tx_head = entry;
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
skb_tx_timestamp(skb);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (TX_BUFFS_AVAIL(bp) < 1)
+ if (macb_tx_ring_avail(bp) < 1)
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -712,7 +891,7 @@ static int macb_alloc_consistent(struct macb *bp)
{
int size;
- size = TX_RING_SIZE * sizeof(struct ring_info);
+ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
bp->tx_skb = kmalloc(size, GFP_KERNEL);
if (!bp->tx_skb)
goto out_err;
@@ -775,9 +954,6 @@ static void macb_init_rings(struct macb *bp)
static void macb_reset_hw(struct macb *bp)
{
- /* Make sure we have the write buffer for ourselves */
- wmb();
-
/*
* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
@@ -788,11 +964,11 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
/* Clear all status flags */
- macb_writel(bp, TSR, ~0UL);
- macb_writel(bp, RSR, ~0UL);
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
/* Disable all interrupts */
- macb_writel(bp, IDR, ~0UL);
+ macb_writel(bp, IDR, -1);
macb_readl(bp, ISR);
}
@@ -879,9 +1055,10 @@ static void macb_init_hw(struct macb *bp)
u32 config;
macb_reset_hw(bp);
- __macb_set_hwaddr(bp);
+ macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
+ config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -891,6 +1068,8 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(NBC); /* No BroadCast */
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ bp->speed = SPEED_10;
+ bp->duplex = DUPLEX_HALF;
macb_configure_dma(bp);
@@ -902,13 +1081,8 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
/* Enable interrupts */
- macb_writel(bp, IER, (MACB_BIT(RCOMP)
- | MACB_BIT(RXUBR)
- | MACB_BIT(ISR_TUND)
- | MACB_BIT(ISR_RLE)
- | MACB_BIT(TXERR)
- | MACB_BIT(TCOMP)
- | MACB_BIT(ISR_ROVR)
+ macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+ | MACB_TX_INT_FLAGS
| MACB_BIT(HRESP)));
}
@@ -996,7 +1170,7 @@ static void macb_sethashtable(struct net_device *dev)
/*
* Enable/Disable promiscuous and multicast modes.
*/
-static void macb_set_rx_mode(struct net_device *dev)
+void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
struct macb *bp = netdev_priv(dev);
@@ -1028,6 +1202,7 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, NCFGR, cfg);
}
+EXPORT_SYMBOL_GPL(macb_set_rx_mode);
static int macb_open(struct net_device *dev)
{
@@ -1135,7 +1310,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
return nstat;
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
@@ -1181,6 +1356,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
+EXPORT_SYMBOL_GPL(macb_get_stats);
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -1204,25 +1380,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phydev, cmd);
}
-static void macb_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+static int macb_get_regs_len(struct net_device *netdev)
+{
+ return MACB_GREGS_NBR * sizeof(u32);
+}
+
+static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
{
struct macb *bp = netdev_priv(dev);
+ unsigned int tail, head;
+ u32 *regs_buff = p;
+
+ regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
+ | MACB_GREGS_VERSION;
+
+ tail = macb_tx_ring_wrap(bp->tx_tail);
+ head = macb_tx_ring_wrap(bp->tx_head);
+
+ regs_buff[0] = macb_readl(bp, NCR);
+ regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
+ regs_buff[2] = macb_readl(bp, NSR);
+ regs_buff[3] = macb_readl(bp, TSR);
+ regs_buff[4] = macb_readl(bp, RBQP);
+ regs_buff[5] = macb_readl(bp, TBQP);
+ regs_buff[6] = macb_readl(bp, RSR);
+ regs_buff[7] = macb_readl(bp, IMR);
- strcpy(info->driver, bp->pdev->dev.driver->name);
- strcpy(info->version, "$Revision: 1.14 $");
- strcpy(info->bus_info, dev_name(&bp->pdev->dev));
+ regs_buff[8] = tail;
+ regs_buff[9] = head;
+ regs_buff[10] = macb_tx_dma(bp, tail);
+ regs_buff[11] = macb_tx_dma(bp, head);
+
+ if (macb_is_gem(bp)) {
+ regs_buff[12] = gem_readl(bp, USRIO);
+ regs_buff[13] = gem_readl(bp, DMACFG);
+ }
}
-static const struct ethtool_ops macb_ethtool_ops = {
+const struct ethtool_ops macb_ethtool_ops = {
.get_settings = macb_get_settings,
.set_settings = macb_set_settings,
- .get_drvinfo = macb_get_drvinfo,
+ .get_regs_len = macb_get_regs_len,
+ .get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
};
+EXPORT_SYMBOL_GPL(macb_ethtool_ops);
-static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
@@ -1235,6 +1441,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
+EXPORT_SYMBOL_GPL(macb_ioctl);
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
@@ -1306,6 +1513,7 @@ static int __init macb_probe(struct platform_device *pdev)
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
+ struct pinctrl *pinctrl;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1313,6 +1521,15 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ err = PTR_ERR(pinctrl);
+ if (err == -EPROBE_DEFER)
+ goto err_out;
+
+ dev_warn(&pdev->dev, "No pinctrl provided\n");
+ }
+
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
if (!dev)
@@ -1328,6 +1545,7 @@ static int __init macb_probe(struct platform_device *pdev)
bp->dev = dev;
spin_lock_init(&bp->lock);
+ INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
bp->pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
@@ -1384,7 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev)
bp->phy_interface = err;
}
- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+ else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
#if defined(CONFIG_ARCH_AT91)
macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
MACB_BIT(CLKEN)));
@@ -1398,8 +1618,6 @@ static int __init macb_probe(struct platform_device *pdev)
macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
#endif
- bp->tx_pending = DEF_TX_RING_PENDING;
-
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 335e288..4414421 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -10,10 +10,15 @@
#ifndef _MACB_H
#define _MACB_H
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
/* MACB register offsets */
#define MACB_NCR 0x0000
#define MACB_NCFGR 0x0004
#define MACB_NSR 0x0008
+#define MACB_TAR 0x000c /* AT91RM9200 only */
+#define MACB_TCR 0x0010 /* AT91RM9200 only */
#define MACB_TSR 0x0014
#define MACB_RBQP 0x0018
#define MACB_TBQP 0x001c
@@ -69,6 +74,12 @@
#define GEM_HRT 0x0084
#define GEM_SA1B 0x0088
#define GEM_SA1T 0x008C
+#define GEM_SA2B 0x0090
+#define GEM_SA2T 0x0094
+#define GEM_SA3B 0x0098
+#define GEM_SA3T 0x009C
+#define GEM_SA4B 0x00A0
+#define GEM_SA4T 0x00A4
#define GEM_OTX 0x0100
#define GEM_DCFG1 0x0280
#define GEM_DCFG2 0x0284
@@ -133,6 +144,8 @@
#define MACB_RTY_SIZE 1
#define MACB_PAE_OFFSET 13
#define MACB_PAE_SIZE 1
+#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
#define MACB_RBOF_OFFSET 14
#define MACB_RBOF_SIZE 2
#define MACB_RLCE_OFFSET 16
@@ -145,6 +158,8 @@
#define MACB_IRXFCS_SIZE 1
/* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET 10
+#define GEM_GBE_SIZE 1
#define GEM_CLK_OFFSET 18
#define GEM_CLK_SIZE 3
#define GEM_DBW_OFFSET 21
@@ -178,6 +193,8 @@
#define MACB_TGO_SIZE 1
#define MACB_BEX_OFFSET 4
#define MACB_BEX_SIZE 1
+#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
#define MACB_COMP_OFFSET 5
#define MACB_COMP_SIZE 1
#define MACB_UND_OFFSET 6
@@ -246,6 +263,8 @@
/* Bitfields in USRIO (AT91) */
#define MACB_RMII_OFFSET 0
#define MACB_RMII_SIZE 1
+#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
+#define GEM_RGMII_SIZE 1
#define MACB_CLKEN_OFFSET 1
#define MACB_CLKEN_SIZE 1
@@ -352,7 +371,12 @@
__v; \
})
-struct dma_desc {
+/**
+ * struct macb_dma_desc - Hardware DMA descriptor
+ * @addr: DMA address of data buffer
+ * @ctrl: Control and status bits
+ */
+struct macb_dma_desc {
u32 addr;
u32 ctrl;
};
@@ -417,7 +441,12 @@ struct dma_desc {
#define MACB_TX_USED_OFFSET 31
#define MACB_TX_USED_SIZE 1
-struct ring_info {
+/**
+ * struct macb_tx_skb - data about an skb which is being transmitted
+ * @skb: skb currently being transmitted
+ * @mapping: DMA address of the skb's data buffer
+ */
+struct macb_tx_skb {
struct sk_buff *skb;
dma_addr_t mapping;
};
@@ -502,12 +531,12 @@ struct macb {
void __iomem *regs;
unsigned int rx_tail;
- struct dma_desc *rx_ring;
+ struct macb_dma_desc *rx_ring;
void *rx_buffers;
unsigned int tx_head, tx_tail;
- struct dma_desc *tx_ring;
- struct ring_info *tx_skb;
+ struct macb_dma_desc *tx_ring;
+ struct macb_tx_skb *tx_skb;
spinlock_t lock;
struct platform_device *pdev;
@@ -515,6 +544,7 @@ struct macb {
struct clk *hclk;
struct net_device *dev;
struct napi_struct napi;
+ struct work_struct tx_error_task;
struct net_device_stats stats;
union {
struct macb_stats macb;
@@ -525,8 +555,6 @@ struct macb {
dma_addr_t tx_ring_dma;
dma_addr_t rx_buffers_dma;
- unsigned int rx_pending, tx_pending;
-
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
unsigned int link;
@@ -534,8 +562,22 @@ struct macb {
unsigned int duplex;
phy_interface_t phy_interface;
+
+ /* AT91RM9200 transmit */
+ struct sk_buff *skb; /* holds skb until xmit interrupt completes */
+ dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
+ int skb_length; /* saved skb length for pci_unmap_single */
};
+extern const struct ethtool_ops macb_ethtool_ops;
+
+int macb_mii_init(struct macb *bp);
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+struct net_device_stats *macb_get_stats(struct net_device *dev);
+void macb_set_rx_mode(struct net_device *dev);
+void macb_set_hwaddr(struct macb *bp);
+void macb_get_hwaddr(struct macb *bp);
+
static inline bool macb_is_gem(struct macb *bp)
{
return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 16814b3..b407043 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -191,6 +191,7 @@
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
/* DMA Normal interrupt */
#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
@@ -210,7 +211,7 @@
#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TUE)
+ DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -373,6 +374,7 @@ struct xgmac_priv {
struct sk_buff **tx_skbuff;
unsigned int tx_head;
unsigned int tx_tail;
+ int tx_irq_cnt;
void __iomem *base;
unsigned int dma_buf_sz;
@@ -663,6 +665,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
{
struct xgmac_dma_desc *p;
dma_addr_t paddr;
+ int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
int entry = priv->rx_head;
@@ -671,13 +674,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
if (unlikely(skb == NULL))
break;
priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz, DMA_FROM_DEVICE);
+ bufsz, DMA_FROM_DEVICE);
desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
}
@@ -701,10 +704,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev)
unsigned int bfsize;
/* Set the Buffer size according to the MTU;
- * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ * The total buffer size including any IP offset must be a multiple
+ * of 8 bytes.
*/
- bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
- 64);
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
@@ -845,9 +848,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
static void xgmac_tx_complete(struct xgmac_priv *priv)
{
int i;
- void __iomem *ioaddr = priv->base;
-
- writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
unsigned int entry = priv->tx_tail;
@@ -888,7 +888,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
}
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
- TX_THRESH)
+ MAX_SKB_FRAGS)
netif_wake_queue(priv->dev);
}
@@ -965,8 +965,7 @@ static int xgmac_hw_init(struct net_device *dev)
ctrl |= XGMAC_CONTROL_IPC;
writel(ctrl, ioaddr + XGMAC_CONTROL);
- value = DMA_CONTROL_DFF;
- writel(value, ioaddr + XGMAC_DMA_CONTROL);
+ writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
/* Set the HW DMA mode and the COE */
writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
@@ -1060,19 +1059,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct xgmac_priv *priv = netdev_priv(dev);
unsigned int entry;
int i;
+ u32 irq_flag;
int nfrags = skb_shinfo(skb)->nr_frags;
struct xgmac_dma_desc *desc, *first;
unsigned int desc_flags;
unsigned int len;
dma_addr_t paddr;
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
- (nfrags + 1)) {
- writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
- priv->base + XGMAC_DMA_INTR_ENA);
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
+ priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
+ irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
TXDESC_CSUM_ALL : 0;
@@ -1113,9 +1108,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Interrupt on completition only for the latest segment */
if (desc != first)
desc_set_tx_owner(desc, desc_flags |
- TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+ TXDESC_LAST_SEG | irq_flag);
else
- desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+ desc_flags |= TXDESC_LAST_SEG | irq_flag;
/* Set owner on first desc last to avoid race condition */
wmb();
@@ -1124,6 +1119,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
writel(1, priv->base + XGMAC_DMA_TX_POLL);
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+ MAX_SKB_FRAGS)
+ netif_stop_queue(dev);
return NETDEV_TX_OK;
}
@@ -1139,9 +1137,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
- writel(DMA_STATUS_RI | DMA_STATUS_NIS,
- priv->base + XGMAC_DMA_STATUS);
-
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@@ -1180,8 +1175,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
xgmac_rx_refill(priv);
- writel(1, priv->base + XGMAC_DMA_RX_POLL);
-
return count;
}
@@ -1205,7 +1198,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
- writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
return work_done;
}
@@ -1350,7 +1343,7 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
struct xgmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- intr_status = readl(ioaddr + XGMAC_INT_STAT);
+ intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
if (intr_status & XGMAC_INT_STAT_PMT) {
netdev_dbg(priv->dev, "received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT */
@@ -1368,9 +1361,9 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
struct xgmac_extra_stats *x = &priv->xstats;
/* read the status register (CSR5) */
- intr_status = readl(priv->base + XGMAC_DMA_STATUS);
- intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
- writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+ intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
+ intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
+ __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
/* It displays the DMA process states (CSR5 register) */
/* ABNORMAL interrupts */
@@ -1405,8 +1398,8 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
}
/* TX/RX NORMAL interrupts */
- if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
- writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
+ __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
napi_schedule(&priv->napi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b63..8c82248 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -42,10 +42,9 @@
#include <linux/mdio.h>
#include "version.h"
-#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_ALERT(adap, fmt, ...) \
- dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a059f0c..2fb01bf 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1758,21 +1758,7 @@ static struct pci_driver rio_driver = {
.remove = __devexit_p(rio_remove1),
};
-static int __init
-rio_init (void)
-{
- return pci_register_driver(&rio_driver);
-}
-
-static void __exit
-rio_exit (void)
-{
- pci_unregister_driver (&rio_driver);
-}
-
-module_init (rio_init);
-module_exit (rio_exit);
-
+module_pci_driver(rio_driver);
/*
Compile command:
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf4c05b..abf26c7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.4.31.0u"
+#define DRV_VER "4.4.161.0u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -53,6 +53,7 @@
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
+#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
#define OC_SUBSYS_DEVICE_ID1 0xE602
#define OC_SUBSYS_DEVICE_ID2 0xE642
#define OC_SUBSYS_DEVICE_ID3 0xE612
@@ -71,6 +72,7 @@ static inline char *nic_name(struct pci_dev *pdev)
case BE_DEVICE_ID2:
return BE3_NAME;
case OC_DEVICE_ID5:
+ case OC_DEVICE_ID6:
return OC_NAME_SH;
default:
return BE_NAME;
@@ -346,7 +348,6 @@ struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
- u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
@@ -374,11 +375,8 @@ struct be_adapter {
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
- u8 eq_next_idx;
struct be_drv_stats drv_stats;
-
u16 vlans_added;
- u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_N_VID];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
@@ -391,6 +389,7 @@ struct be_adapter {
struct delayed_work func_recovery_work;
u32 flags;
+ u32 cmd_privileges;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
int if_handle; /* Used to configure filtering */
@@ -408,10 +407,8 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool stats_cmd_sent;
- u8 generation; /* BladeEngine ASIC generation */
u32 if_type;
struct {
- u8 __iomem *base; /* Door Bell */
u32 size;
u32 total_size;
u64 io_addr;
@@ -434,10 +431,18 @@ struct be_adapter {
struct phy_info phy;
u8 wol_cap;
bool wol;
- u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
u32 uc_macs; /* Count of secondary UC MAC programmed */
u32 msg_enable;
int be_get_temp_freq;
+ u16 max_mcast_mac;
+ u16 max_tx_queues;
+ u16 max_rss_queues;
+ u16 max_rx_queues;
+ u16 max_pmac_cnt;
+ u16 max_vlans;
+ u16 max_event_queues;
+ u32 if_cap_flags;
+ u8 pf_number;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -448,21 +453,25 @@ struct be_adapter {
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
-/* BladeEngine Generation numbers */
-#define BE_GEN2 2
-#define BE_GEN3 3
-
#define ON 1
#define OFF 0
-#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
- (adapter->pdev->device == OC_DEVICE_ID4))
-#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
+ adapter->pdev->device == OC_DEVICE_ID4)
+
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
+ adapter->pdev->device == OC_DEVICE_ID6)
+
+#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
+ adapter->pdev->device == OC_DEVICE_ID2)
+#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
+ adapter->pdev->device == OC_DEVICE_ID1)
-#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
- adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
- (adapter->function_mode & RDMA_ENABLED))
+#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
+
+#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
+ (adapter->function_mode & RDMA_ENABLED))
extern const struct ethtool_ops be_ethtool_ops;
@@ -637,12 +646,6 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
}
}
-static inline bool be_type_2_3(struct be_adapter *adapter)
-{
- return (adapter->if_type == SLI_INTF_TYPE_2 ||
- adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
-}
-
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index af60bb2..f2875aa 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,55 @@
#include "be.h"
#include "be_cmds.h"
+static struct be_cmd_priv_map cmd_priv_map[] = {
+ {
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ CMD_SUBSYSTEM_ETH,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_SET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_ETH_GET_PPORT_STATS,
+ CMD_SUBSYSTEM_ETH,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_PHY_DETAILS,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ }
+};
+
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
+ u8 subsystem)
+{
+ int i;
+ int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+ u32 cmd_privileges = adapter->cmd_privileges;
+
+ for (i = 0; i < num_entries; i++)
+ if (opcode == cmd_priv_map[i].opcode &&
+ subsystem == cmd_priv_map[i].subsystem)
+ if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
+ return false;
+
+ return true;
+}
+
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
@@ -419,14 +468,13 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
{
u32 sem;
+ u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
+ SLIPORT_SEMAPHORE_OFFSET_BE;
- if (lancer_chip(adapter))
- sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
- else
- sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+ pci_read_config_dword(adapter->pdev, reg, &sem);
+ *stage = sem & POST_STAGE_MASK;
- *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
- if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
+ if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
return -1;
else
return 0;
@@ -452,10 +500,33 @@ int lancer_wait_ready(struct be_adapter *adapter)
return status;
}
+static bool lancer_provisioning_error(struct be_adapter *adapter)
+{
+ u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ sliport_err1 = ioread32(adapter->db +
+ SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db +
+ SLIPORT_ERROR2_OFFSET);
+
+ if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
+ sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
+ return true;
+ }
+ return false;
+}
+
int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
{
int status;
u32 sliport_status, err, reset_needed;
+ bool resource_error;
+
+ resource_error = lancer_provisioning_error(adapter);
+ if (resource_error)
+ return -1;
+
status = lancer_wait_ready(adapter);
if (!status) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -477,6 +548,14 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
status = -1;
}
}
+ /* Stop error recovery if error is not recoverable.
+ * No resource error is temporary errors and will go away
+ * when PF provisions resources.
+ */
+ resource_error = lancer_provisioning_error(adapter);
+ if (status == -1 && !resource_error)
+ adapter->eeh_error = true;
+
return status;
}
@@ -601,6 +680,9 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_mcc_wrb *wrb;
+ if (!mccq->created)
+ return NULL;
+
if (atomic_read(&mccq->used) >= mccq->len) {
dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
return NULL;
@@ -1155,8 +1237,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
- if (!status)
- q->created = false;
+ q->created = false;
mutex_unlock(&adapter->mbox_lock);
return status;
@@ -1183,8 +1264,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
- if (!status)
- q->created = false;
+ q->created = false;
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -1281,7 +1361,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
- if (adapter->generation == BE_GEN3)
+ /* version 1 of the cmd is not supported only by BE2 */
+ if (!BE2_chip(adapter))
hdr->version = 1;
be_mcc_notify(adapter);
@@ -1301,6 +1382,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct lancer_cmd_req_pport_stats *req;
int status = 0;
+ if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
+ CMD_SUBSYSTEM_ETH))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1367,7 +1452,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
- if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+ /* version 1 of the cmd is not supported only by BE2 */
+ if (!BE2_chip(adapter))
req->hdr.version = 1;
req->hdr.domain = dom;
@@ -1658,9 +1744,9 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
*/
- if (!lancer_chip(adapter) || be_physfn(adapter))
- req->if_flags_mask |=
- cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags_mask |=
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
+ adapter->if_cap_flags);
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
netdev_for_each_mc_addr(ha, adapter->netdev)
@@ -1680,6 +1766,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
struct be_cmd_req_set_flow_control *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1709,6 +1799,10 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
struct be_cmd_req_get_flow_control *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -2067,7 +2161,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
int offset)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req;
+ struct be_cmd_read_flash_crc *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2080,7 +2174,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
+ wrb, NULL);
req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
@@ -2089,7 +2184,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
status = be_mcc_notify_wait(adapter);
if (!status)
- memcpy(flashed_crc, req->params.data_buf, 4);
+ memcpy(flashed_crc, req->crc, 4);
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -2275,6 +2370,10 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
struct be_dma_mem cmd;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -2434,6 +2533,42 @@ err:
return status;
}
+/* Get privilege(s) for a function */
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fn_privileges *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fn_privileges *resp =
+ embedded_payload(wrb);
+ *privilege = le32_to_cpu(resp->privilege_mask);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses synchronous MCCQ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id, u8 domain)
@@ -2651,6 +2786,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
int payload_len = sizeof(*req);
struct be_dma_mem cmd;
+ if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ CMD_SUBSYSTEM_ETH))
+ return -EPERM;
+
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2792,6 +2931,240 @@ err:
return status;
}
+static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+ u32 max_buf_size)
+{
+ struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ desc->desc_len = RESOURCE_DESC_SIZE;
+ if (((void *)desc + desc->desc_len) >
+ (void *)(buf + max_buf_size)) {
+ desc = NULL;
+ break;
+ }
+
+ if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
+ break;
+
+ desc = (void *)desc + desc->desc_len;
+ }
+
+ if (!desc || i == MAX_RESOURCE_DESC)
+ return NULL;
+
+ return desc;
+}
+
+/* Uses Mbox */
+int be_cmd_get_func_config(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_func_config *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_func_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FUNC_CONFIG,
+ cmd.size, wrb, &cmd);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_func_config *resp = cmd.va;
+ u32 desc_count = le32_to_cpu(resp->desc_count);
+ struct be_nic_resource_desc *desc;
+
+ desc = be_get_nic_desc(resp->func_param, desc_count,
+ sizeof(resp->func_param));
+ if (!desc) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ adapter->pf_number = desc->pf_num;
+ adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
+ adapter->max_vlans = le16_to_cpu(desc->vlan_count);
+ adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+ adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
+ adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
+ adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
+
+ adapter->max_event_queues = le16_to_cpu(desc->eq_count);
+ adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
+ return status;
+}
+
+ /* Uses sync mcc */
+int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+ u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_profile_config *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PROFILE_CONFIG,
+ cmd.size, wrb, &cmd);
+
+ req->type = ACTIVE_PROFILE_TYPE;
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_profile_config *resp = cmd.va;
+ u32 desc_count = le32_to_cpu(resp->desc_count);
+ struct be_nic_resource_desc *desc;
+
+ desc = be_get_nic_desc(resp->func_param, desc_count,
+ sizeof(resp->func_param));
+
+ if (!desc) {
+ status = -EINVAL;
+ goto err;
+ }
+ *cap_flags = le32_to_cpu(desc->cap_flags);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
+ u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_profile_config *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+ req->desc_count = cpu_to_le32(1);
+
+ req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
+ req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
+ req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
+ req->nic_desc.pf_num = adapter->pf_number;
+ req->nic_desc.vf_num = domain;
+
+ /* Mark fields invalid */
+ req->nic_desc.unicast_mac_count = 0xFFFF;
+ req->nic_desc.mcc_count = 0xFFFF;
+ req->nic_desc.vlan_count = 0xFFFF;
+ req->nic_desc.mcast_mac_count = 0xFFFF;
+ req->nic_desc.txq_count = 0xFFFF;
+ req->nic_desc.rq_count = 0xFFFF;
+ req->nic_desc.rssq_count = 0xFFFF;
+ req->nic_desc.lro_count = 0xFFFF;
+ req->nic_desc.cq_count = 0xFFFF;
+ req->nic_desc.toe_conn_count = 0xFFFF;
+ req->nic_desc.eq_count = 0xFFFF;
+ req->nic_desc.link_param = 0xFF;
+ req->nic_desc.bw_min = 0xFFFFFFFF;
+ req->nic_desc.acpi_params = 0xFF;
+ req->nic_desc.wol_param = 0x0F;
+
+ /* Change BW */
+ req->nic_desc.bw_min = cpu_to_le32(bps);
+ req->nic_desc.bw_max = cpu_to_le32(bps);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_enable_disable_vf *req;
+ int status;
+
+ if (!lancer_chip(adapter))
+ return 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+ req->enable = 1;
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0936e21..d6552e1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -196,9 +196,14 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_MAC_LIST 147
#define OPCODE_COMMON_SET_MAC_LIST 148
#define OPCODE_COMMON_GET_HSW_CONFIG 152
+#define OPCODE_COMMON_GET_FUNC_CONFIG 160
+#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
+#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
#define OPCODE_COMMON_SET_HSW_CONFIG 153
+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
#define OPCODE_ETH_RSS_CONFIG 1
#define OPCODE_ETH_ACPI_CONFIG 2
@@ -1151,14 +1156,22 @@ struct flashrom_params {
u32 op_type;
u32 data_buf_size;
u32 offset;
- u8 data_buf[4];
};
struct be_cmd_write_flashrom {
struct be_cmd_req_hdr hdr;
struct flashrom_params params;
-};
+ u8 data_buf[32768];
+ u8 rsvd[4];
+} __packed;
+/* cmd to read flash crc */
+struct be_cmd_read_flash_crc {
+ struct be_cmd_req_hdr hdr;
+ struct flashrom_params params;
+ u8 crc[4];
+ u8 rsvd[4];
+};
/**************** Lancer Firmware Flash ************/
struct amap_lancer_write_obj_context {
u8 write_length[24];
@@ -1429,6 +1442,41 @@ struct be_cmd_resp_set_func_cap {
u8 rsvd[212];
};
+/*********************** Function Privileges ***********************/
+enum {
+ BE_PRIV_DEFAULT = 0x1,
+ BE_PRIV_LNKQUERY = 0x2,
+ BE_PRIV_LNKSTATS = 0x4,
+ BE_PRIV_LNKMGMT = 0x8,
+ BE_PRIV_LNKDIAG = 0x10,
+ BE_PRIV_UTILQUERY = 0x20,
+ BE_PRIV_FILTMGMT = 0x40,
+ BE_PRIV_IFACEMGMT = 0x80,
+ BE_PRIV_VHADM = 0x100,
+ BE_PRIV_DEVCFG = 0x200,
+ BE_PRIV_DEVSEC = 0x400
+};
+#define MAX_PRIVILEGES (BE_PRIV_VHADM | BE_PRIV_DEVCFG | \
+ BE_PRIV_DEVSEC)
+#define MIN_PRIVILEGES BE_PRIV_DEFAULT
+
+struct be_cmd_priv_map {
+ u8 opcode;
+ u8 subsystem;
+ u32 priv_mask;
+};
+
+struct be_cmd_req_get_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+struct be_cmd_resp_get_fn_privileges {
+ struct be_cmd_resp_hdr hdr;
+ u32 privilege_mask;
+};
+
+
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
struct be_cmd_req_get_mac_list {
@@ -1608,33 +1656,6 @@ struct be_cmd_resp_get_stats_v1 {
struct be_hw_stats_v1 hw_stats;
};
-static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
-
- return &cmd->hw_stats;
- } else {
- struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
-
- return &cmd->hw_stats;
- }
-}
-
-static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->erx;
- } else {
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->erx;
- }
-}
-
-
/************** get fat capabilites *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
@@ -1684,6 +1705,96 @@ struct be_cmd_req_set_ext_fat_caps {
struct be_fat_conf_params set_params;
};
+#define RESOURCE_DESC_SIZE 72
+#define NIC_RESOURCE_DESC_TYPE_ID 0x41
+#define MAX_RESOURCE_DESC 4
+
+/* QOS unit number */
+#define QUN 4
+/* Immediate */
+#define IMM 6
+/* No save */
+#define NOSV 7
+
+struct be_nic_resource_desc {
+ u8 desc_type;
+ u8 desc_len;
+ u8 rsvd1;
+ u8 flags;
+ u8 vf_num;
+ u8 rsvd2;
+ u8 pf_num;
+ u8 rsvd3;
+ u16 unicast_mac_count;
+ u8 rsvd4[6];
+ u16 mcc_count;
+ u16 vlan_count;
+ u16 mcast_mac_count;
+ u16 txq_count;
+ u16 rq_count;
+ u16 rssq_count;
+ u16 lro_count;
+ u16 cq_count;
+ u16 toe_conn_count;
+ u16 eq_count;
+ u32 rsvd5;
+ u32 cap_flags;
+ u8 link_param;
+ u8 rsvd6[3];
+ u32 bw_min;
+ u32 bw_max;
+ u8 acpi_params;
+ u8 wol_param;
+ u16 rsvd7;
+ u32 rsvd8[3];
+};
+
+struct be_cmd_req_get_func_config {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_func_config {
+ struct be_cmd_req_hdr hdr;
+ u32 desc_count;
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+};
+
+#define ACTIVE_PROFILE_TYPE 0x2
+struct be_cmd_req_get_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd;
+ u8 type;
+ u16 rsvd1;
+};
+
+struct be_cmd_resp_get_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u32 desc_count;
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+};
+
+struct be_cmd_req_set_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+ u32 desc_count;
+ struct be_nic_resource_desc nic_desc;
+};
+
+struct be_cmd_resp_set_profile_config {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_enable_disable_vf {
+ struct be_cmd_req_hdr hdr;
+ u8 enable;
+ u8 rsvd[3];
+};
+
+static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
+{
+ return flags & adapter->cmd_privileges ? true : false;
+}
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1780,6 +1891,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
+ u32 *privilege, u32 domain);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id,
u8 domain);
@@ -1798,4 +1911,10 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
extern int lancer_wait_ready(struct be_adapter *adapter);
extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+extern int be_cmd_get_func_config(struct be_adapter *adapter);
+extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+ u8 domain);
+extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
+ u8 domain);
+extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8e6fb0b..00454a1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -261,6 +261,9 @@ be_get_reg_len(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return 0;
+
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
@@ -525,6 +528,10 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
u8 link_status;
u16 link_speed = 0;
int status;
+ u32 auto_speeds;
+ u32 fixed_speeds;
+ u32 dac_cable_len;
+ u16 interface_type;
if (adapter->phy.link_speed < 0) {
status = be_cmd_link_status_query(adapter, &link_speed,
@@ -534,39 +541,46 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, link_speed);
status = be_cmd_get_phy_info(adapter);
- if (status)
- return status;
-
- ecmd->supported =
- convert_to_et_setting(adapter->phy.interface_type,
- adapter->phy.auto_speeds_supported |
- adapter->phy.fixed_speeds_supported);
- ecmd->advertising =
- convert_to_et_setting(adapter->phy.interface_type,
- adapter->phy.auto_speeds_supported);
-
- ecmd->port = be_get_port_type(adapter->phy.interface_type,
- adapter->phy.dac_cable_len);
-
- if (adapter->phy.auto_speeds_supported) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |= ADVERTISED_Autoneg;
- }
+ if (!status) {
+ interface_type = adapter->phy.interface_type;
+ auto_speeds = adapter->phy.auto_speeds_supported;
+ fixed_speeds = adapter->phy.fixed_speeds_supported;
+ dac_cable_len = adapter->phy.dac_cable_len;
+
+ ecmd->supported =
+ convert_to_et_setting(interface_type,
+ auto_speeds |
+ fixed_speeds);
+ ecmd->advertising =
+ convert_to_et_setting(interface_type,
+ auto_speeds);
+
+ ecmd->port = be_get_port_type(interface_type,
+ dac_cable_len);
+
+ if (adapter->phy.auto_speeds_supported) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ }
- if (be_pause_supported(adapter)) {
ecmd->supported |= SUPPORTED_Pause;
- ecmd->advertising |= ADVERTISED_Pause;
- }
-
- switch (adapter->phy.interface_type) {
- case PHY_TYPE_KR_10GB:
- case PHY_TYPE_KX4_10GB:
- ecmd->transceiver = XCVR_INTERNAL;
- break;
- default:
- ecmd->transceiver = XCVR_EXTERNAL;
- break;
+ if (be_pause_supported(adapter))
+ ecmd->advertising |= ADVERTISED_Pause;
+
+ switch (adapter->phy.interface_type) {
+ case PHY_TYPE_KR_10GB:
+ case PHY_TYPE_KX4_10GB:
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ } else {
+ ecmd->port = PORT_OTHER;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
}
/* Save for future use */
@@ -787,6 +801,10 @@ static int
be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return 0;
+
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index b755f70..541d453 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -31,12 +31,12 @@
#define MPU_EP_CONTROL 0
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+/********** MPU semphore: used for SH & BE *************/
+#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
+#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
+#define POST_STAGE_MASK 0x0000FFFF
+#define POST_ERR_MASK 0x1
+#define POST_ERR_SHIFT 31
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
@@ -59,6 +59,9 @@
#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
#define PHYSDEV_CONTROL_INP_MASK 0x40000000
+#define SLIPORT_ERROR_NO_RESOURCE1 0x2
+#define SLIPORT_ERROR_NO_RESOURCE2 0x9
+
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -102,11 +105,6 @@
#define SLI_INTF_TYPE_2 2
#define SLI_INTF_TYPE_3 3
-/* SLI family */
-#define BE_SLI_FAMILY 0x0
-#define LANCER_A0_SLI_FAMILY 0xA
-#define SKYHAWK_SLI_FAMILY 0x2
-
/********* ISR0 Register offset **********/
#define CEV_ISR0_OFFSET 0xC18
#define CEV_ISR_SIZE 4
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d1b6cc5..c365722 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -44,6 +44,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,23 +238,46 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
int status = 0;
u8 current_mac[ETH_ALEN];
u32 pmac_id = adapter->pmac_id[0];
+ bool active_mac = true;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- status = be_cmd_mac_addr_query(adapter, current_mac, false,
- adapter->if_handle, 0);
+ /* For BE VF, MAC address is already activated by PF.
+ * Hence only operation left is updating netdev->devaddr.
+ * Update it if user is passing the same MAC which was used
+ * during configuring VF MAC from PF(Hypervisor).
+ */
+ if (!lancer_chip(adapter) && !be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, current_mac,
+ false, adapter->if_handle, 0);
+ if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
+ goto done;
+ else
+ goto err;
+ }
+
+ if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
+ goto done;
+
+ /* For Lancer check if any MAC is active.
+ * If active, get its mac id.
+ */
+ if (lancer_chip(adapter) && !be_physfn(adapter))
+ be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
+ &pmac_id, 0);
+
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+
if (status)
goto err;
- if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id[0], 0);
- if (status)
- goto err;
-
- be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
- }
+ if (active_mac)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ pmac_id, 0);
+done:
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
return 0;
err:
@@ -261,7 +285,35 @@ err:
return status;
}
-static void populate_be2_stats(struct be_adapter *adapter)
+/* BE2 supports only v0 cmd */
+static void *hw_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (BE2_chip(adapter)) {
+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ }
+}
+
+/* BE2 supports only v0 cmd */
+static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (BE2_chip(adapter)) {
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ }
+}
+
+static void populate_be_v0_stats(struct be_adapter *adapter)
{
struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -310,7 +362,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
-static void populate_be3_stats(struct be_adapter *adapter)
+static void populate_be_v1_stats(struct be_adapter *adapter)
{
struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -412,28 +464,25 @@ void be_parse_stats(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int i;
- if (adapter->generation == BE_GEN3) {
- if (lancer_chip(adapter))
- populate_lancer_stats(adapter);
- else
- populate_be3_stats(adapter);
+ if (lancer_chip(adapter)) {
+ populate_lancer_stats(adapter);
} else {
- populate_be2_stats(adapter);
- }
-
- if (lancer_chip(adapter))
- goto done;
+ if (BE2_chip(adapter))
+ populate_be_v0_stats(adapter);
+ else
+ /* for BE3 and Skyhawk */
+ populate_be_v1_stats(adapter);
- /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
- for_all_rx_queues(adapter, rxo, i) {
- /* below erx HW counter can actually wrap around after
- * 65535. Driver accumulates a 32-bit value
- */
- accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
- (u16)erx->rx_drops_no_fragments[rxo->q.id]);
+ /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+ for_all_rx_queues(adapter, rxo, i) {
+ /* below erx HW counter can actually wrap around after
+ * 65535. Driver accumulates a 32-bit value
+ */
+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+ (u16)erx->rx_drops_no_fragments \
+ [rxo->q.id]);
+ }
}
-done:
- return;
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -597,16 +646,6 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
hdr, skb_shinfo(skb)->gso_size);
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
- if (lancer_chip(adapter) && adapter->sli_family ==
- LANCER_A0_SLI_FAMILY) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
- if (is_tcp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb,
- tcpcs, hdr, 1);
- else if (is_udp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb,
- udpcs, hdr, 1);
- }
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -856,11 +895,15 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!be_physfn(adapter)) {
+ if (!lancer_chip(adapter) && !be_physfn(adapter)) {
status = -EINVAL;
goto ret;
}
+ /* Packets with VID 0 are always received by Lancer by default */
+ if (lancer_chip(adapter) && vid == 0)
+ goto ret;
+
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
status = be_vid_config(adapter);
@@ -878,11 +921,15 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!be_physfn(adapter)) {
+ if (!lancer_chip(adapter) && !be_physfn(adapter)) {
status = -EINVAL;
goto ret;
}
+ /* Packets with VID 0 are always received by Lancer by default */
+ if (lancer_chip(adapter) && vid == 0)
+ goto ret;
+
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
status = be_vid_config(adapter);
@@ -917,7 +964,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > BE_MAX_MC) {
+ netdev_mc_count(netdev) > adapter->max_mcast_mac) {
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
goto done;
}
@@ -962,6 +1009,9 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
+ bool active_mac = false;
+ u32 pmac_id;
+ u8 old_mac[ETH_ALEN];
if (!sriov_enabled(adapter))
return -EPERM;
@@ -970,6 +1020,12 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return -EINVAL;
if (lancer_chip(adapter)) {
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, vf + 1);
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ pmac_id, vf + 1);
+
status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
} else {
status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
@@ -1062,7 +1118,10 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return -EINVAL;
}
- status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
+ if (lancer_chip(adapter))
+ status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
+ else
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
@@ -1837,12 +1896,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_num_txqs_want(struct be_adapter *adapter)
{
- if (sriov_want(adapter) || be_is_mc(adapter) ||
- lancer_chip(adapter) || !be_physfn(adapter) ||
- adapter->generation == BE_GEN2)
+ if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
+ be_is_mc(adapter) ||
+ (!lancer_chip(adapter) && !be_physfn(adapter)) ||
+ BE2_chip(adapter))
return 1;
else
- return MAX_TX_QS;
+ return adapter->max_tx_queues;
}
static int be_tx_cqs_create(struct be_adapter *adapter)
@@ -2177,9 +2237,11 @@ static void be_msix_disable(struct be_adapter *adapter)
static uint be_num_rss_want(struct be_adapter *adapter)
{
u32 num = 0;
+
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !sriov_want(adapter) && be_physfn(adapter)) {
- num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ (lancer_chip(adapter) ||
+ (!sriov_want(adapter) && be_physfn(adapter)))) {
+ num = adapter->max_rss_queues;
num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
}
return num;
@@ -2579,10 +2641,30 @@ static int be_clear(struct be_adapter *adapter)
be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+
be_msix_disable(adapter);
return 0;
}
+static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
+ u32 *cap_flags, u8 domain)
+{
+ bool profile_present = false;
+ int status;
+
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, cap_flags, domain);
+ if (!status)
+ profile_present = true;
+ }
+
+ if (!profile_present)
+ *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+}
+
static int be_vf_setup_init(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
@@ -2634,9 +2716,13 @@ static int be_vf_setup(struct be_adapter *adapter)
if (status)
goto err;
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
+ be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
+
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+
status = be_cmd_if_create(adapter, cap_flags, en_flags,
&vf_cfg->if_handle, vf + 1);
if (status)
@@ -2661,6 +2747,8 @@ static int be_vf_setup(struct be_adapter *adapter)
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
+
+ be_cmd_enable_vf(adapter, vf + 1);
}
return 0;
err:
@@ -2674,7 +2762,10 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->if_handle = -1;
adapter->be3_native = false;
adapter->promiscuous = false;
- adapter->eq_next_idx = 0;
+ if (be_physfn(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+ else
+ adapter->cmd_privileges = MIN_PRIVILEGES;
}
static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2712,12 +2803,93 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
return status;
}
+static void be_get_resources(struct be_adapter *adapter)
+{
+ int status;
+ bool profile_present = false;
+
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_func_config(adapter);
+
+ if (!status)
+ profile_present = true;
+ }
+
+ if (profile_present) {
+ /* Sanity fixes for Lancer */
+ adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
+ BE_UC_PMAC_COUNT);
+ adapter->max_vlans = min_t(u16, adapter->max_vlans,
+ BE_NUM_VLANS_SUPPORTED);
+ adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
+ BE_MAX_MC);
+ adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
+ MAX_TX_QS);
+ adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
+ BE3_MAX_RSS_QS);
+ adapter->max_event_queues = min_t(u16,
+ adapter->max_event_queues,
+ BE3_MAX_RSS_QS);
+
+ if (adapter->max_rss_queues &&
+ adapter->max_rss_queues == adapter->max_rx_queues)
+ adapter->max_rss_queues -= 1;
+
+ if (adapter->max_event_queues < adapter->max_rss_queues)
+ adapter->max_rss_queues = adapter->max_event_queues;
+
+ } else {
+ if (be_physfn(adapter))
+ adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
+ else
+ adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
+
+ if (adapter->function_mode & FLEX10_MODE)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
+ adapter->max_mcast_mac = BE_MAX_MC;
+ adapter->max_tx_queues = MAX_TX_QS;
+ adapter->max_rss_queues = (adapter->be3_native) ?
+ BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ adapter->max_event_queues = BE3_MAX_RSS_QS;
+
+ adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS;
+
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
+ adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
+ }
+}
+
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
- int pos;
+ int pos, status;
u16 dev_num_vfs;
+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
+ &adapter->function_mode,
+ &adapter->function_caps);
+ if (status)
+ goto err;
+
+ be_get_resources(adapter);
+
+ /* primary mac needs 1 pmac entry */
+ adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!adapter->pmac_id) {
+ status = -ENOMEM;
+ goto err;
+ }
+
pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos) {
pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
@@ -2726,13 +2898,14 @@ static int be_get_config(struct be_adapter *adapter)
dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
adapter->dev_num_vfs = dev_num_vfs;
}
- return 0;
+err:
+ return status;
}
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 cap_flags, en_flags;
+ u32 en_flags;
u32 tx_fc, rx_fc;
int status;
u8 mac[ETH_ALEN];
@@ -2740,9 +2913,12 @@ static int be_setup(struct be_adapter *adapter)
be_setup_init(adapter);
- be_get_config(adapter);
+ if (!lancer_chip(adapter))
+ be_cmd_req_native_mode(adapter);
- be_cmd_req_native_mode(adapter);
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
be_msix_enable(adapter);
@@ -2762,24 +2938,22 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
+ be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (be_is_mc(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
- cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
- cap_flags |= BE_IF_FLAGS_RSS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
en_flags |= BE_IF_FLAGS_RSS;
- }
- if (lancer_chip(adapter) && !be_physfn(adapter)) {
- en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
- cap_flags = en_flags;
- }
+ en_flags = en_flags & adapter->if_cap_flags;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
&adapter->if_handle, 0);
if (status != 0)
goto err;
@@ -2827,8 +3001,8 @@ static int be_setup(struct be_adapter *adapter)
dev_warn(dev, "device doesn't support SRIOV\n");
}
- be_cmd_get_phy_info(adapter);
- if (be_pause_supported(adapter))
+ status = be_cmd_get_phy_info(adapter);
+ if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -2895,7 +3069,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
int i = 0, img_type = 0;
struct flash_section_info_g2 *fsec_g2 = NULL;
- if (adapter->generation != BE_GEN3)
+ if (BE2_chip(adapter))
fsec_g2 = (struct flash_section_info_g2 *)fsec;
for (i = 0; i < MAX_FLASH_COMP; i++) {
@@ -2928,7 +3102,49 @@ struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
return NULL;
}
-static int be_flash_data(struct be_adapter *adapter,
+static int be_flash(struct be_adapter *adapter, const u8 *img,
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
+{
+ u32 total_bytes = 0, flash_op, num_bytes = 0;
+ int status = 0;
+ struct be_cmd_write_flashrom *req = flash_cmd->va;
+
+ total_bytes = img_size;
+ while (total_bytes) {
+ num_bytes = min_t(u32, 32*1024, total_bytes);
+
+ total_bytes -= num_bytes;
+
+ if (!total_bytes) {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
+
+ memcpy(req->data_buf, img, num_bytes);
+ img += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
+ flash_op, num_bytes);
+ if (status) {
+ if (status == ILLEGAL_IOCTL_REQ &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return status;
+ }
+ }
+ return 0;
+}
+
+/* For BE2 and BE3 */
+static int be_flash_BEx(struct be_adapter *adapter,
const struct firmware *fw,
struct be_dma_mem *flash_cmd,
int num_of_images)
@@ -2936,12 +3152,9 @@ static int be_flash_data(struct be_adapter *adapter,
{
int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
- u32 total_bytes = 0, flash_op;
- int num_bytes;
const u8 *p = fw->data;
- struct be_cmd_write_flashrom *req = flash_cmd->va;
const struct flash_comp *pflashcomp;
- int num_comp, hdr_size;
+ int num_comp, redboot;
struct flash_section_info *fsec = NULL;
struct flash_comp gen3_flash_types[] = {
@@ -2986,7 +3199,7 @@ static int be_flash_data(struct be_adapter *adapter,
FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
};
- if (adapter->generation == BE_GEN3) {
+ if (BE3_chip(adapter)) {
pflashcomp = gen3_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g3);
num_comp = ARRAY_SIZE(gen3_flash_types);
@@ -2995,6 +3208,7 @@ static int be_flash_data(struct be_adapter *adapter,
filehdr_size = sizeof(struct flash_file_hdr_g2);
num_comp = ARRAY_SIZE(gen2_flash_types);
}
+
/* Get flash section info*/
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
@@ -3010,70 +3224,105 @@ static int be_flash_data(struct be_adapter *adapter,
memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
continue;
- if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
- if (!phy_flashing_required(adapter))
+ if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
+ !phy_flashing_required(adapter))
continue;
- }
-
- hdr_size = filehdr_size +
- (num_of_images * sizeof(struct image_hdr));
- if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
- (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
- pflashcomp[i].size, hdr_size)))
- continue;
+ if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
+ redboot = be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size,
+ filehdr_size + img_hdrs_size);
+ if (!redboot)
+ continue;
+ }
- /* Flash the component */
p = fw->data;
p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
- total_bytes = pflashcomp[i].size;
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
- if (!total_bytes) {
- if (pflashcomp[i].optype == OPTYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_FLASH;
- else
- flash_op = FLASHROM_OPER_FLASH;
- } else {
- if (pflashcomp[i].optype == OPTYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_SAVE;
- else
- flash_op = FLASHROM_OPER_SAVE;
- }
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- pflashcomp[i].optype, flash_op, num_bytes);
- if (status) {
- if ((status == ILLEGAL_IOCTL_REQ) &&
- (pflashcomp[i].optype ==
- OPTYPE_PHY_FW))
- break;
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed.\n");
- return -1;
- }
+
+ status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
+ pflashcomp[i].size);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Flashing section type %d failed.\n",
+ pflashcomp[i].img_type);
+ return status;
}
}
return 0;
}
-static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+static int be_flash_skyhawk(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- if (fhdr == NULL)
- return 0;
- if (fhdr->build[0] == '3')
- return BE_GEN3;
- else if (fhdr->build[0] == '2')
- return BE_GEN2;
- else
- return 0;
+ int status = 0, i, filehdr_size = 0;
+ int img_offset, img_size, img_optype, redboot;
+ int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+ const u8 *p = fw->data;
+ struct flash_section_info *fsec = NULL;
+
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+ if (!fsec) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid Cookie. UFI corrupted ?\n");
+ return -1;
+ }
+
+ for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
+ img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
+ img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+
+ switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
+ case IMAGE_FIRMWARE_iSCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCoE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_iSCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ default:
+ continue;
+ }
+
+ if (img_optype == OPTYPE_REDBOOT) {
+ redboot = be_flash_redboot(adapter, fw->data,
+ img_offset, img_size,
+ filehdr_size + img_hdrs_size);
+ if (!redboot)
+ continue;
+ }
+
+ p = fw->data;
+ p += filehdr_size + img_offset + img_hdrs_size;
+ if (p + img_size > fw->data + fw->size)
+ return -1;
+
+ status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Flashing section type %d failed.\n",
+ fsec->fsec_entry[i].type);
+ return status;
+ }
+ }
+ return 0;
}
static int lancer_wait_idle(struct be_adapter *adapter)
@@ -3207,6 +3456,28 @@ lancer_fw_exit:
return status;
}
+#define UFI_TYPE2 2
+#define UFI_TYPE3 3
+#define UFI_TYPE4 4
+static int be_get_ufi_type(struct be_adapter *adapter,
+ struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ goto be_get_ufi_exit;
+
+ if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
+ return UFI_TYPE4;
+ else if (BE3_chip(adapter) && fhdr->build[0] == '3')
+ return UFI_TYPE3;
+ else if (BE2_chip(adapter) && fhdr->build[0] == '2')
+ return UFI_TYPE2;
+
+be_get_ufi_exit:
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ return -1;
+}
+
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
struct flash_file_hdr_g2 *fhdr;
@@ -3214,12 +3485,9 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
const u8 *p;
- int status = 0, i = 0, num_imgs = 0;
+ int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
- p = fw->data;
- fhdr = (struct flash_file_hdr_g2 *) p;
-
- flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
@@ -3229,27 +3497,32 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
goto be_fw_exit;
}
- if ((adapter->generation == BE_GEN3) &&
- (get_ufigen_type(fhdr) == BE_GEN3)) {
- fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
- num_imgs = le32_to_cpu(fhdr3->num_imgs);
- for (i = 0; i < num_imgs; i++) {
- img_hdr_ptr = (struct image_hdr *) (fw->data +
- (sizeof(struct flash_file_hdr_g3) +
- i * sizeof(struct image_hdr)));
- if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
- status = be_flash_data(adapter, fw, &flash_cmd,
- num_imgs);
+ p = fw->data;
+ fhdr = (struct flash_file_hdr_g2 *)p;
+
+ ufi_type = be_get_ufi_type(adapter, fhdr);
+
+ fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *)(fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
+ if (ufi_type == UFI_TYPE4)
+ status = be_flash_skyhawk(adapter, fw,
+ &flash_cmd, num_imgs);
+ else if (ufi_type == UFI_TYPE3)
+ status = be_flash_BEx(adapter, fw, &flash_cmd,
+ num_imgs);
}
- } else if ((adapter->generation == BE_GEN2) &&
- (get_ufigen_type(fhdr) == BE_GEN2)) {
- status = be_flash_data(adapter, fw, &flash_cmd, 0);
- } else {
- dev_err(&adapter->pdev->dev,
- "UFI and Interface are not compatible for flashing\n");
- status = -1;
}
+ if (ufi_type == UFI_TYPE2)
+ status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
+ else if (ufi_type == -1)
+ status = -1;
+
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
flash_cmd.dma);
if (status) {
@@ -3344,80 +3617,47 @@ static void be_netdev_init(struct net_device *netdev)
static void be_unmap_pci_bars(struct be_adapter *adapter)
{
- if (adapter->csr)
- iounmap(adapter->csr);
if (adapter->db)
- iounmap(adapter->db);
- if (adapter->roce_db.base)
- pci_iounmap(adapter->pdev, adapter->roce_db.base);
+ pci_iounmap(adapter->pdev, adapter->db);
}
-static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
+static int db_bar(struct be_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
- u8 __iomem *addr;
-
- addr = pci_iomap(pdev, 2, 0);
- if (addr == NULL)
- return -ENOMEM;
+ if (lancer_chip(adapter) || !be_physfn(adapter))
+ return 0;
+ else
+ return 4;
+}
- adapter->roce_db.base = addr;
- adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
- adapter->roce_db.size = 8192;
- adapter->roce_db.total_size = pci_resource_len(pdev, 2);
+static int be_roce_map_pci_bars(struct be_adapter *adapter)
+{
+ if (skyhawk_chip(adapter)) {
+ adapter->roce_db.size = 4096;
+ adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
+ db_bar(adapter));
+ adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
+ db_bar(adapter));
+ }
return 0;
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int db_reg;
+ u32 sli_intf;
- if (lancer_chip(adapter)) {
- if (be_type_2_3(adapter)) {
- addr = ioremap_nocache(
- pci_resource_start(adapter->pdev, 0),
- pci_resource_len(adapter->pdev, 0));
- if (addr == NULL)
- return -ENOMEM;
- adapter->db = addr;
- }
- if (adapter->if_type == SLI_INTF_TYPE_3) {
- if (lancer_roce_map_pci_bars(adapter))
- goto pci_map_err;
- }
- return 0;
- }
-
- if (be_physfn(adapter)) {
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
- }
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+ SLI_INTF_IF_TYPE_SHIFT;
- if (adapter->generation == BE_GEN2) {
- db_reg = 4;
- } else {
- if (be_physfn(adapter))
- db_reg = 4;
- else
- db_reg = 0;
- }
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
- pci_resource_len(adapter->pdev, db_reg));
+ addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
if (addr == NULL)
goto pci_map_err;
adapter->db = addr;
- if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
- adapter->roce_db.size = 4096;
- adapter->roce_db.io_addr =
- pci_resource_start(adapter->pdev, db_reg);
- adapter->roce_db.total_size =
- pci_resource_len(adapter->pdev, db_reg);
- }
+
+ be_roce_map_pci_bars(adapter);
return 0;
+
pci_map_err:
be_unmap_pci_bars(adapter);
return -ENOMEM;
@@ -3437,7 +3677,6 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
if (mem->va)
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
- kfree(adapter->pmac_id);
}
static int be_ctrl_init(struct be_adapter *adapter)
@@ -3445,8 +3684,14 @@ static int be_ctrl_init(struct be_adapter *adapter)
struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
struct be_dma_mem *rx_filter = &adapter->rx_filter;
+ u32 sli_intf;
int status;
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT;
+ adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+
status = be_map_pci_bars(adapter);
if (status)
goto done;
@@ -3473,13 +3718,6 @@ static int be_ctrl_init(struct be_adapter *adapter)
goto free_mbox;
}
memset(rx_filter->va, 0, rx_filter->size);
-
- /* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -3512,14 +3750,14 @@ static int be_stats_init(struct be_adapter *adapter)
{
struct be_dma_mem *cmd = &adapter->stats_cmd;
- if (adapter->generation == BE_GEN2) {
+ if (lancer_chip(adapter))
+ cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+ else if (BE2_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- } else {
- if (lancer_chip(adapter))
- cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
- else
- cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- }
+ else
+ /* BE3 and Skyhawk */
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
GFP_KERNEL);
if (cmd->va == NULL)
@@ -3573,6 +3811,9 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
u32 level = 0;
int j;
+ if (lancer_chip(adapter))
+ return 0;
+
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
@@ -3598,26 +3839,12 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
err:
return level;
}
+
static int be_get_initial_config(struct be_adapter *adapter)
{
int status;
u32 level;
- status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
- &adapter->function_mode, &adapter->function_caps);
- if (status)
- return status;
-
- if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
-
- if (be_physfn(adapter))
- adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
- else
- adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
-
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
@@ -3642,55 +3869,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
return 0;
}
-static int be_dev_type_check(struct be_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- u32 sli_intf = 0, if_type;
-
- switch (pdev->device) {
- case BE_DEVICE_ID1:
- case OC_DEVICE_ID1:
- adapter->generation = BE_GEN2;
- break;
- case BE_DEVICE_ID2:
- case OC_DEVICE_ID2:
- adapter->generation = BE_GEN3;
- break;
- case OC_DEVICE_ID3:
- case OC_DEVICE_ID4:
- pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
- if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
- if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
- !be_type_2_3(adapter)) {
- dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
- return -EINVAL;
- }
- adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
- SLI_INTF_FAMILY_SHIFT);
- adapter->generation = BE_GEN3;
- break;
- case OC_DEVICE_ID5:
- pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
- dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
- return -EINVAL;
- }
- adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
- SLI_INTF_FAMILY_SHIFT);
- adapter->generation = BE_GEN3;
- break;
- default:
- adapter->generation = 0;
- }
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
- return 0;
-}
-
static int lancer_recover_func(struct be_adapter *adapter)
{
int status;
@@ -3721,8 +3899,9 @@ static int lancer_recover_func(struct be_adapter *adapter)
"Adapter SLIPORT recovery succeeded\n");
return 0;
err:
- dev_err(&adapter->pdev->dev,
- "Adapter SLIPORT recovery failed\n");
+ if (adapter->eeh_error)
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery failed\n");
return status;
}
@@ -3845,11 +4024,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
adapter = netdev_priv(netdev);
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
-
- status = be_dev_type_check(adapter);
- if (status)
- goto free_netdev;
-
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -4023,9 +4197,6 @@ static void be_shutdown(struct pci_dev *pdev)
netif_device_detach(adapter->netdev);
- if (adapter->wol)
- be_setup_wol(adapter, true);
-
be_cmd_reset_function(adapter);
pci_disable_device(pdev);
@@ -4061,9 +4232,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
/* The error could cause the FW to trigger a flash debug dump.
* Resetting the card while flash dump is in progress
- * can cause it not to recover; wait for it to finish
+ * can cause it not to recover; wait for it to finish.
+ * Wait only for first function as it is needed only once per
+ * adapter.
*/
- ssleep(30);
+ if (pdev->devfn == 0)
+ ssleep(30);
+
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index deecc44..55d32aa 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -47,10 +47,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
dev_info.dpp_unmapped_len = 0;
}
dev_info.pdev = adapter->pdev;
- if (adapter->sli_family == SKYHAWK_SLI_FAMILY)
- dev_info.db = adapter->db;
- else
- dev_info.db = adapter->roce_db.base;
+ dev_info.db = adapter->db;
dev_info.unmapped_db = adapter->roce_db.io_addr;
dev_info.db_page_size = adapter->roce_db.size;
dev_info.db_total_size = adapter->roce_db.total_size;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index feff516..5ba6e1c 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -92,4 +92,13 @@ config GIANFAR
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
+config FEC_PTP
+ bool "PTP Hardware Clock (PHC)"
+ depends on FEC && ARCH_MXC
+ select PTP_1588_CLOCK
+ default y if SOC_IMX6Q
+ --help---
+ Say Y here if you want to use PTP Hardware Clock (PHC) in the
+ driver. Only the basic clock operations have been implemented.
+
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3d1839a..d4d19b3 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_PTP) += fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fffd205..2665162 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -140,21 +140,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
- * to keep them that size.
- * We don't need to allocate pages for the transmitter. We just use
- * the skbuffer directly.
- */
-#define FEC_ENET_RX_PAGES 8
-#define FEC_ENET_RX_FRSIZE 2048
-#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
-#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
-#define FEC_ENET_TX_FRSIZE 2048
-#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE 16 /* Must be power of two */
-#define TX_RING_MOD_MASK 15 /* for this to work */
-
#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
#error "FEC: descriptor ring size constants too large"
#endif
@@ -179,9 +164,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
#define PKT_MAXBLR_SIZE 1520
-/* This device has up to three irqs on some platforms */
-#define FEC_IRQ_NUM 3
-
/*
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
@@ -194,61 +176,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define OPT_FRAME_SIZE 0
#endif
-/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
- * tx_bd_base always point to the base of the buffer descriptors. The
- * cur_rx and cur_tx point to the currently available buffer.
- * The dirty_tx tracks the current buffer that is being sent by the
- * controller. The cur_tx and dirty_tx are equal under both completely
- * empty and completely full conditions. The empty/ready indicator in
- * the buffer descriptor determines the actual condition.
- */
-struct fec_enet_private {
- /* Hardware registers of the FEC device */
- void __iomem *hwp;
-
- struct net_device *netdev;
-
- struct clk *clk_ipg;
- struct clk *clk_ahb;
-
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
- unsigned char *tx_bounce[TX_RING_SIZE];
- struct sk_buff* tx_skbuff[TX_RING_SIZE];
- struct sk_buff* rx_skbuff[RX_RING_SIZE];
- ushort skb_cur;
- ushort skb_dirty;
-
- /* CPM dual port RAM relative addresses */
- dma_addr_t bd_dma;
- /* Address of Rx and Tx buffers */
- struct bufdesc *rx_bd_base;
- struct bufdesc *tx_bd_base;
- /* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
- /* The ring entries to be free()ed */
- struct bufdesc *dirty_tx;
-
- uint tx_full;
- /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
- spinlock_t hw_lock;
-
- struct platform_device *pdev;
-
- int opened;
- int dev_id;
-
- /* Phylib and MDIO interface */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
- int mii_timeout;
- uint phy_speed;
- phy_interface_t phy_interface;
- int link;
- int full_duplex;
- struct completion mdio_done;
- int irq[FEC_IRQ_NUM];
-};
-
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
#define FEC_MMFR_OP_READ (2 << 28)
@@ -353,6 +280,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_bdu = 0;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ fep->hwts_tx_en)) {
+ bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else {
+
+ bdp->cbd_esc = BD_ENET_TX_INT;
+ }
+#endif
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -510,10 +448,17 @@ fec_restart(struct net_device *ndev, int duplex)
writel(1 << 8, fep->hwp + FEC_X_WMRK);
}
+#ifdef CONFIG_FEC_PTP
+ ecntl |= (1 << 4);
+#endif
+
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+#ifdef CONFIG_FEC_PTP
+ fec_ptp_start_cyclecounter(ndev);
+#endif
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
@@ -599,6 +544,19 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_packets++;
}
+#ifdef CONFIG_FEC_PTP
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ unsigned long flags;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ shhwtstamps.hwtstamp = ns_to_ktime(
+ timecounter_cyc2time(&fep->tc, bdp->ts));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+#endif
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
@@ -725,6 +683,21 @@ fec_enet_rx(struct net_device *ndev)
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
+#ifdef CONFIG_FEC_PTP
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps(skb);
+ unsigned long flags;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ shhwtstamps->hwtstamp = ns_to_ktime(
+ timecounter_cyc2time(&fep->tc, bdp->ts));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+#endif
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
}
@@ -739,6 +712,12 @@ rx_processing_done:
status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status;
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_esc = BD_ENET_RX_INT;
+ bdp->cbd_prot = 0;
+ bdp->cbd_bdu = 0;
+#endif
+
/* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
@@ -1178,6 +1157,10 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
+#ifdef CONFIG_FEC_PTP
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_ioctl(ndev, rq, cmd);
+#endif
return phy_mii_ioctl(phydev, rq, cmd);
}
@@ -1224,6 +1207,9 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
bdp++;
}
@@ -1237,6 +1223,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
+
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
bdp++;
}
@@ -1638,9 +1628,19 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
+#ifdef CONFIG_FEC_PTP
+ fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ if (IS_ERR(fep->clk_ptp)) {
+ ret = PTR_ERR(fep->clk_ptp);
+ goto failed_clk;
+ }
+#endif
+
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
-
+#ifdef CONFIG_FEC_PTP
+ clk_prepare_enable(fep->clk_ptp);
+#endif
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
ret = regulator_enable(reg_phy);
@@ -1668,6 +1668,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
+#ifdef CONFIG_FEC_PTP
+ fec_ptp_init(ndev, pdev);
+#endif
+
return 0;
failed_register:
@@ -1677,6 +1681,9 @@ failed_init:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
+#ifdef CONFIG_FEC_PTP
+ clk_disable_unprepare(fep->clk_ptp);
+#endif
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1709,6 +1716,12 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
+#ifdef CONFIG_FEC_PTP
+ del_timer_sync(&fep->time_keep);
+ clk_disable_unprepare(fep->clk_ptp);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+#endif
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8408c62..c5a3bc1 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,6 +13,12 @@
#define FEC_H
/****************************************************************************/
+#ifdef CONFIG_FEC_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif
+
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
@@ -88,6 +94,13 @@ struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
+#ifdef CONFIG_FEC_PTP
+ unsigned long cbd_esc;
+ unsigned long cbd_prot;
+ unsigned long cbd_bdu;
+ unsigned long ts;
+ unsigned short res0[4];
+#endif
};
#else
struct bufdesc {
@@ -147,6 +160,112 @@ struct bufdesc {
#define BD_ENET_TX_CSL ((ushort)0x0001)
#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+/*enhanced buffer desciptor control/status used by Ethernet transmit*/
+#define BD_ENET_TX_INT 0x40000000
+#define BD_ENET_TX_TS 0x20000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM 3
+
+/* The number of Tx and Rx buffers. These are allocated from the page
+ * pool. The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter. We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_RX_PAGES 8
+#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE 2048
+#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE 16 /* Must be power of two */
+#define TX_RING_MOD_MASK 15 /* for this to work */
+
+#define BD_ENET_RX_INT 0x00800000
+#define BD_ENET_RX_PTP ((ushort)0x0400)
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ void __iomem *hwp;
+
+ struct net_device *netdev;
+
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+#ifdef CONFIG_FEC_PTP
+ struct clk *clk_ptp;
+#endif
+
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ ushort skb_cur;
+ ushort skb_dirty;
+
+ /* CPM dual port RAM relative addresses */
+ dma_addr_t bd_dma;
+ /* Address of Rx and Tx buffers */
+ struct bufdesc *rx_bd_base;
+ struct bufdesc *tx_bd_base;
+ /* The next free ring entry */
+ struct bufdesc *cur_rx, *cur_tx;
+ /* The ring entries to be free()ed */
+ struct bufdesc *dirty_tx;
+
+ uint tx_full;
+ /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+ spinlock_t hw_lock;
+
+ struct platform_device *pdev;
+
+ int opened;
+ int dev_id;
+
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
+ phy_interface_t phy_interface;
+ int link;
+ int full_duplex;
+ struct completion mdio_done;
+ int irq[FEC_IRQ_NUM];
+
+#ifdef CONFIG_FEC_PTP
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ unsigned long last_overflow_check;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ int rx_hwtstamp_filter;
+ u32 base_incval;
+ u32 cycle_speed;
+ int hwts_rx_en;
+ int hwts_tx_en;
+ struct timer_list time_keep;
+#endif
+
+};
+
+#ifdef CONFIG_FEC_PTP
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+#endif
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 0000000..c40526c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,383 @@
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE 0x00002000
+#define FEC_T_CTRL_CAPTURE 0x00000800
+#define FEC_T_CTRL_RESTART 0x00000200
+#define FEC_T_CTRL_PERIOD_RST 0x00000030
+#define FEC_T_CTRL_PERIOD_EN 0x00000010
+#define FEC_T_CTRL_ENABLE 0x00000001
+
+#define FEC_T_INC_MASK 0x0000007f
+#define FEC_T_INC_OFFSET 0
+#define FEC_T_INC_CORR_MASK 0x00007f00
+#define FEC_T_INC_CORR_OFFSET 8
+
+#define FEC_ATIME_CTRL 0x400
+#define FEC_ATIME 0x404
+#define FEC_ATIME_EVT_OFFSET 0x408
+#define FEC_ATIME_EVT_PERIOD 0x40c
+#define FEC_ATIME_CORR 0x410
+#define FEC_ATIME_INC 0x414
+#define FEC_TS_TIMESTAMP 0x418
+
+#define FEC_CC_MULT (1 << 31)
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
+ int inc;
+
+ inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+
+ /* grab the ptp lock */
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* 1ns counter */
+ writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+ /* use free running count */
+ writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+ writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
+
+ memset(&fep->cc, 0, sizeof(fep->cc));
+ fep->cc.read = fec_ptp_read;
+ fep->cc.mask = CLOCKSOURCE_MASK(32);
+ fep->cc.shift = 31;
+ fep->cc.mult = FEC_CC_MULT;
+
+ /* reset the ns time counter */
+ timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 diff;
+ unsigned long flags;
+ int neg_adj = 0;
+ u32 mult = FEC_CC_MULT;
+
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ neg_adj = 1;
+ }
+
+ diff = mult;
+ diff *= ppb;
+ diff = div_u64(diff, 1000000000ULL);
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ /*
+ * dummy read to set cycle_last in tc to now.
+ * So use adjusted mult to calculate when next call
+ * timercounter_read.
+ */
+ timecounter_read(&fep->tc);
+
+ fep->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ unsigned long flags;
+ u64 now;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ now = timecounter_read(&fep->tc);
+ now += delta;
+
+ /* reset the timecounter */
+ timecounter_init(&fep->tc, &fep->cc, now);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct fec_enet_private *adapter =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ u64 ns;
+ unsigned long flags;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @ndev: pointer to net_device
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ */
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ fep->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ fep->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (fep->hwts_rx_en)
+ fep->hwts_rx_en = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ default:
+ /*
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
+ */
+ fep->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ * because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(unsigned long _data)
+{
+ struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ mod_timer(&fep->time_keep, jiffies + HZ);
+}
+
+/**
+ * fec_ptp_init
+ * @ndev: The FEC network adapter
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fep->ptp_caps.owner = THIS_MODULE;
+ snprintf(fep->ptp_caps.name, 16, "fec ptp");
+
+ fep->ptp_caps.max_adj = 250000000;
+ fep->ptp_caps.n_alarm = 0;
+ fep->ptp_caps.n_ext_ts = 0;
+ fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.pps = 0;
+ fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjtime = fec_ptp_adjtime;
+ fep->ptp_caps.gettime = fec_ptp_gettime;
+ fep->ptp_caps.settime = fec_ptp_settime;
+ fep->ptp_caps.enable = fec_ptp_enable;
+
+ spin_lock_init(&fep->tmreg_lock);
+
+ fec_ptp_start_cyclecounter(ndev);
+
+ init_timer(&fep->time_keep);
+ fep->time_keep.data = (unsigned long)fep;
+ fep->time_keep.function = fec_time_keep;
+ fep->time_keep.expires = jiffies + HZ;
+ add_timer(&fep->time_keep);
+
+ fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+ if (IS_ERR(fep->ptp_clock)) {
+ fep->ptp_clock = NULL;
+ pr_err("ptp_clock_register failed\n");
+ } else {
+ pr_info("registered PHC device on %s\n", ndev->name);
+ }
+}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 19ac096..bffb2ed 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -210,7 +210,7 @@ static int gfar_init_bds(struct net_device *ndev)
skb = gfar_new_skb(ndev);
if (!skb) {
netdev_err(ndev, "Can't allocate RX buffers\n");
- goto err_rxalloc_fail;
+ return -ENOMEM;
}
rx_queue->rx_skbuff[j] = skb;
@@ -223,10 +223,6 @@ static int gfar_init_bds(struct net_device *ndev)
}
return 0;
-
-err_rxalloc_fail:
- free_skb_resources(priv);
- return -ENOMEM;
}
static int gfar_alloc_skb_resources(struct net_device *ndev)
@@ -1359,7 +1355,11 @@ static int gfar_restore(struct device *dev)
return 0;
}
- gfar_init_bds(ndev);
+ if (gfar_init_bds(ndev)) {
+ free_skb_resources(priv);
+ return -ENOMEM;
+ }
+
init_registers(ndev);
gfar_set_mac_address(ndev);
gfar_init_mac(ndev);
@@ -1712,6 +1712,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
tx_queue->tx_skbuff[i] = NULL;
}
kfree(tx_queue->tx_skbuff);
+ tx_queue->tx_skbuff = NULL;
}
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
@@ -1735,6 +1736,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
rxbdp++;
}
kfree(rx_queue->rx_skbuff);
+ rx_queue->rx_skbuff = NULL;
}
/* If there are any tx skbs or rx skbs still around, free them.
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 479e43e..84c6b6c 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -738,13 +738,11 @@ static int __devexit mal_remove(struct platform_device *ofdev)
/* Synchronize with scheduled polling */
napi_disable(&mal->napi);
- if (!list_empty(&mal->list)) {
+ if (!list_empty(&mal->list))
/* This is *very* bad */
- printk(KERN_EMERG
+ WARN(1, KERN_EMERG
"mal%d: commac list is not empty on remove!\n",
mal->index);
- WARN_ON(1);
- }
dev_set_drvdata(&ofdev->dev, NULL);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0cafe4f..73d28d5 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -93,6 +93,7 @@ config E1000E
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@ config IGB_DCA
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
-config IGB_PTP
- bool "PTP Hardware Clock (PHC)"
- default n
- depends on IGB && EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- ---help---
- Say Y here if you want to use PTP Hardware Clock (PHC) in the
- driver. Only the basic clock operations have been implemented.
-
- Every timestamp and clock read operations must consult the
- overflow counter to form a correct time value.
-
config IGBVF
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
@@ -180,6 +168,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI && INET
select MDIO
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@ config IXGBE_DCB
If unsure, say N.
-config IXGBE_PTP
- bool "PTP Clock Support"
- default n
- depends on IXGBE && EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- ---help---
- Say Y here if you want support for 1588 Timestamping with a
- PHC device, using the PTP 1588 Clock support. This is
- required to enable timestamping support for the device.
-
- If unsure, say N.
-
config IXGBEVF
tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 3d68395..8fedd24 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -107,6 +107,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
};
static DEFINE_SPINLOCK(e1000_eeprom_lock);
+static DEFINE_SPINLOCK(e1000_phy_lock);
/**
* e1000_set_phy_type - Set the phy type member in the hw struct.
@@ -2830,19 +2831,25 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
+ unsigned long flags;
e_dbg("e1000_read_phy_reg");
+ spin_lock_irqsave(&e1000_phy_lock, flags);
+
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(u16) reg_addr);
- if (ret_val)
+ if (ret_val) {
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
+ }
}
ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data);
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
}
@@ -2965,19 +2972,25 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
+ unsigned long flags;
e_dbg("e1000_write_phy_reg");
+ spin_lock_irqsave(&e1000_phy_lock, flags);
+
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(u16) reg_addr);
- if (ret_val)
+ if (ret_val) {
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
+ }
}
ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data);
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0..dadb13b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5067,6 +5067,17 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /*
+ * The minimum packet size with TCTL.PSP set is 17 bytes so
+ * pad skb in order to meet this minimum size requirement
+ */
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
+ }
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
u8 hdr_len;
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197f..624476c 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o
-
-igb-$(CONFIG_IGB_PTP) += igb_ptp.o
+ e1000_i210.o igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e..8c12dbd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
break;
case e1000_i211:
@@ -1277,12 +1278,20 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
+ u32 phpm_reg;
ctrl = rd32(E1000_CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
+ /* Clear Go Link Disconnect bit */
+ if (hw->mac.type >= e1000_82580) {
+ phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
ret_val = igb_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
@@ -2233,19 +2242,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
- E1000_IPCNFG_EEE_100M_AN);
- eeer |= (E1000_EEER_TX_LPI_EN |
- E1000_EEER_RX_LPI_EN |
+ u32 eee_su = rd32(E1000_EEE_SU);
+
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
- /* keep the LPI clock running before EEE is enabled */
- if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
- u32 eee_su;
- eee_su = rd32(E1000_EEE_SU);
- eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
- wr32(E1000_EEE_SU, eee_su);
- }
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453..44b76b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41e..198d148 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -636,6 +636,7 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
+#define NVM_VERSION 0x0005
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
@@ -653,6 +654,19 @@
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
+/* NVM version defines */
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_BUILD_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -860,6 +874,7 @@
#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f93..4147429 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -423,6 +423,100 @@ s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
}
/**
+ * igb_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver) {
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
* igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3..974d235 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@ extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
+extern s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -65,6 +67,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
(ID_LED_OFF1_OFF2 << 4) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145..7acddfe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1391,6 +1391,10 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ /* All MDI settings are supported on 82580 and newer. */
+ if (hw->mac.type >= e1000_82580)
+ goto out;
+
if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
hw_dbg("Invalid MDI setting detected\n");
hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e..e2b2c4b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
#include "e1000_phy.h"
#include "e1000_nvm.h"
#include "e1000_defines.h"
+#include "e1000_i210.h"
/*
* Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf..7db3f80 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -710,3 +710,74 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igb_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output structure
+ *
+ * unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 fw_version;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ break;
+ default:
+ return;
+ }
+ /* basic eeprom version numbers */
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
+
+ /* etrack id */
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i350:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ ((comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT));
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b022..7012d45 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc7..fe76004 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- goto out;
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ goto out;
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- goto out;
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ goto out;
- hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ }
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
@@ -1710,6 +1715,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
switch (hw->phy.id) {
case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6ac3299..ed282f8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -124,6 +124,7 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
/* Enable flexible speed on link-up */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230..d8fd5b6 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,9 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-#ifdef CONFIG_IGB_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IGB_PTP */
#include <linux/bitops.h>
#include <linux/if_vlan.h>
@@ -132,9 +130,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256 256
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@@ -151,11 +150,18 @@ struct vf_data_storage {
#define IGB_MNG_VLAN_NONE -1
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
+enum igb_tx_flags {
+ /* cmd_type flags */
+ IGB_TX_FLAGS_VLAN = 0x01,
+ IGB_TX_FLAGS_TSO = 0x02,
+ IGB_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGB_TX_FLAGS_IPV4 = 0x10,
+ IGB_TX_FLAGS_CSUM = 0x20,
+};
+
+/* VLAN info */
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
@@ -174,11 +180,9 @@ struct igb_tx_buffer {
};
struct igb_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
- u32 page_offset;
+ unsigned int page_offset;
};
struct igb_tx_queue_stats {
@@ -205,22 +209,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- int cpu; /* CPU for DCA */
- u32 eims_value; /* EIMS mask value */
-
- struct igb_ring_container rx, tx;
-
- struct napi_struct napi;
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
- char name[IFNAMSIZ + 9];
-};
-
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
@@ -232,15 +220,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
- u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
- u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_clean;
u16 next_to_use;
+ u16 next_to_alloc;
union {
/* TX */
@@ -251,12 +241,30 @@ struct igb_ring {
};
/* RX */
struct {
+ struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
- /* Items past this point are only used during ring alloc / free */
- dma_addr_t dma; /* phys address of the ring */
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
@@ -373,7 +381,6 @@ struct igb_adapter {
u32 wvbr;
u32 *shadow_vfta;
-#ifdef CONFIG_IGB_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct delayed_work ptp_overflow_work;
@@ -382,17 +389,18 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
-#endif /* CONFIG_IGB_PTP */
char fw_version[32];
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -436,18 +444,27 @@ extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
extern void igb_set_fw_version(struct igb_adapter *);
-#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(q_vector, skb);
+}
+
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-#endif /* CONFIG_IGB_PTP */
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea0128..0acf590 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/highmem.h>
#include "igb.h"
@@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size /= 2;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size + 10) == 0xBE) &&
- (*(skb->data + frame_size + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+ data = kmap(rx_buffer->page);
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+ kunmap(rx_buffer->page);
+
+ return match;
}
static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
- struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
- unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
- /* unmap rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- total_bytes += tx_buffer_info->bytecount;
- total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
- txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
- netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
@@ -2301,7 +2310,6 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IGB_PTP
case e1000_82576:
case e1000_82580:
case e1000_i350:
@@ -2337,12 +2345,288 @@ static int igb_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
}
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ipcnfg, eeer;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ ipcnfg = rd32(E1000_IPCNFG);
+ eeer = rd32(E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
+ edata->advertised = ADVERTISED_1000baseT_Full;
+
+ if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
+ edata->advertised |= ADVERTISED_100baseT_Full;
+
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+
+ /* Report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI timer is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.advertised != edata->advertised) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE options are not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+ igb_set_eee_i350(hw);
+
+ /* reset link */
+ if (!netif_running(netdev))
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+
static int igb_ethtool_begin(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2383,6 +2667,10 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
.get_ts_info = igb_get_ts_info,
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37..7044aaa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -61,7 +61,7 @@
#define MAJ 4
#define MIN 0
-#define BUILD 1
+#define BUILD 17
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -534,31 +534,27 @@ rx_ring_summary:
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX -------"
- "--------- %p%s\n", "RWB", i,
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb, next_desc);
+ next_desc);
} else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX"
- " %p%s\n", "R ", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb, next_desc);
+ next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->skb) {
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, buffer_info->skb->data,
- IGB_RX_HDR_LEN, true);
+ buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- PAGE_SIZE/2, true);
+ IGB_RX_BUFSZ, true);
}
}
}
@@ -656,80 +652,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
-static void igb_free_queues(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
- adapter->num_rx_queues = 0;
- adapter->num_tx_queues = 0;
-}
-
-/**
- * igb_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
-static int igb_alloc_queues(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* For 82575, context index must be unique per ring. */
- if (adapter->hw.mac.type == e1000_82575)
- set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
- adapter->tx_ring[i] = ring;
- }
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* set flag indicating ring supports SCTP checksum offload */
- if (adapter->hw.mac.type >= e1000_82576)
- set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
- /*
- * On i350, i210, and i211, loopback VLAN packets
- * have the tag byte-swapped.
- * */
- if (adapter->hw.mac.type >= e1000_i350)
- set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
- adapter->rx_ring[i] = ring;
- }
-
- igb_cache_ring_register(adapter);
-
- return 0;
-
-err:
- igb_free_queues(adapter);
-
- return -ENOMEM;
-}
-
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -960,6 +882,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
}
/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+
+ /*
+ * ixgbe_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -969,17 +920,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
- int v_idx;
+ int v_idx = adapter->num_q_vectors;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- if (!q_vector)
- continue;
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- }
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
}
/**
@@ -990,7 +938,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
- igb_free_queues(adapter);
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
@@ -1001,7 +948,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
-static int igb_set_interrupt_capability(struct igb_adapter *adapter)
+static void igb_set_interrupt_capability(struct igb_adapter *adapter)
{
int err;
int numvecs, i;
@@ -1038,7 +985,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries,
numvecs);
if (err == 0)
- goto out;
+ return;
igb_reset_interrupt_capability(adapter);
@@ -1068,105 +1015,183 @@ msi_only:
adapter->num_q_vectors = 1;
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
-out:
- /* Notify the stack of the (possibly) reduced queue counts. */
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- err = netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
- rtnl_unlock();
- return err;
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+ struct igb_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
}
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ int v_count, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
{
struct igb_q_vector *q_vector;
- struct e1000_hw *hw = &adapter->hw;
- int v_idx;
+ struct igb_ring *ring;
+ int ring_count, size;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
- if (!q_vector)
- goto err_out;
- q_vector->adapter = adapter;
- q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
- q_vector->itr_val = IGB_START_ITR;
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igb_q_vector) +
+ (sizeof(struct igb_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igb_add_ring(ring, &q_vector->tx);
+
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
}
- return 0;
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
-err_out:
- igb_free_q_vectors(adapter);
- return -ENOMEM;
-}
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
-static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
- q_vector->rx.ring = adapter->rx_ring[ring_idx];
- q_vector->rx.ring->q_vector = q_vector;
- q_vector->rx.count++;
- q_vector->itr_val = adapter->rx_itr_setting;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
-}
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /*
+ * On i350, i210, and i211, loopback VLAN packets
+ * have the tag byte-swapped.
+ * */
+ if (adapter->hw.mac.type >= e1000_i350)
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
- q_vector->tx.ring = adapter->tx_ring[ring_idx];
- q_vector->tx.ring->q_vector = q_vector;
- q_vector->tx.count++;
- q_vector->itr_val = adapter->tx_itr_setting;
- q_vector->tx.work_limit = adapter->tx_work_limit;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
}
+
/**
- * igb_map_ring_to_vector - maps allocated queues to vectors
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function maps the recently allocated queues to vectors.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
-static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
- int i;
- int v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
- if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
- (adapter->num_q_vectors < adapter->num_tx_queues))
- return -ENOMEM;
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
- if (adapter->num_q_vectors >=
- (adapter->num_rx_queues + adapter->num_tx_queues)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
- } else {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (i < adapter->num_tx_queues)
- igb_map_tx_ring_to_vector(adapter, i, v_idx);
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
}
- for (; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
}
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
}
/**
@@ -1179,9 +1204,7 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err;
- err = igb_set_interrupt_capability(adapter);
- if (err)
- return err;
+ igb_set_interrupt_capability(adapter);
err = igb_alloc_q_vectors(adapter);
if (err) {
@@ -1189,24 +1212,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- err = igb_map_ring_to_vector(adapter);
- if (err) {
- dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
- goto err_map_queues;
- }
-
+ igb_cache_ring_register(adapter);
return 0;
-err_map_queues:
- igb_free_queues(adapter);
-err_alloc_queues:
- igb_free_q_vectors(adapter);
+
err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter);
return err;
@@ -1229,11 +1238,11 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (!err)
goto request_done;
/* fall back to MSI */
+ igb_free_all_tx_resources(adapter);
+ igb_free_all_rx_resources(adapter);
igb_clear_interrupt_scheme(adapter);
if (!pci_enable_msi(pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
- igb_free_all_tx_resources(adapter);
- igb_free_all_rx_resources(adapter);
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
adapter->num_q_vectors = 1;
@@ -1243,13 +1252,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
"Unable to allocate memory for vectors\n");
goto request_done;
}
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for queues\n");
- igb_free_q_vectors(adapter);
- goto request_done;
- }
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
}
@@ -1706,10 +1708,8 @@ void igb_reset(struct igb_adapter *adapter)
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
-#ifdef CONFIG_IGB_PTP
/* Re-enable PTP, where applicable. */
igb_ptp_reset(adapter);
-#endif /* CONFIG_IGB_PTP */
igb_get_phy_info(hw);
}
@@ -1783,58 +1783,34 @@ static const struct net_device_ops igb_netdev_ops = {
void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 major, build, patch, fw_version;
- u32 etrack_id;
-
- hw->nvm.ops.read(hw, 5, 1, &fw_version);
- if (adapter->hw.mac.type != e1000_i211) {
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
- etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
-
- /* combo image version needs to be found */
- hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) &&
- (comb_offset != IGB_NVM_VER_INVALID)) {
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
- + 1), 1, &comb_verh);
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
- 1, &comb_verl);
-
- /* Only display Option Rom if it exists and is valid */
- if ((comb_verh && comb_verl) &&
- ((comb_verh != IGB_NVM_VER_INVALID) &&
- (comb_verl != IGB_NVM_VER_INVALID))) {
- major = comb_verl >> IGB_COMB_VER_SHFT;
- build = (comb_verl << IGB_COMB_VER_SHFT) |
- (comb_verh >> IGB_COMB_VER_SHFT);
- patch = comb_verh & IGB_COMB_VER_MASK;
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x, %d.%d.%d",
- (fw_version & IGB_MAJOR_MASK) >>
- IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >>
- IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK),
- etrack_id, major, build, patch);
- goto out;
- }
- }
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK), etrack_id);
- } else {
+ struct e1000_fw_version fw;
+
+ igb_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i211:
snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK));
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+
+ default:
+ /* if option is rom valid, display its version too */
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ }
+ break;
}
-out:
return;
}
@@ -2141,10 +2117,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
#endif
-#ifdef CONFIG_IGB_PTP
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
-#endif /* CONFIG_IGB_PTP */
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
@@ -2219,9 +2193,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_PTP
igb_ptp_stop(adapter);
-#endif /* CONFIG_IGB_PTP */
/*
* The watchdog timer may be rescheduled, so explicitly
@@ -2531,6 +2503,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
@@ -2560,6 +2543,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
return 0;
+err_set_queues:
+ igb_free_irq(adapter);
err_req_irq:
igb_release_hw_control(adapter);
igb_power_down_link(adapter);
@@ -2637,10 +2622,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2777,18 +2760,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
-
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2893,18 +2874,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* Don't need to set TUOFL or IPOFL, they default to 1 */
wr32(E1000_RXCSUM, rxcsum);
- /*
- * Generate RSS hash based on TCP port numbers and/or
- * IPv4/v6 src and dst addresses since UDP cannot be
- * hashed reliably due to IP fragmentation
- */
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
* if we are only using one queue */
@@ -3106,16 +3090,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
- srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
- srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-#ifdef CONFIG_IGB_PTP
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* CONFIG_IGB_PTP */
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3283,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
unsigned long size;
u16 i;
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (buffer_info->dma) {
- dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- if (buffer_info->page_dma) {
- dma_unmap_page(rx_ring->dev,
- buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- buffer_info->page_offset = 0;
- }
+ if (!buffer_info->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
}
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3312,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -4028,6 +3998,9 @@ static int igb_tso(struct igb_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -4148,26 +4121,32 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
}
-static __le32 igb_tx_cmd_type(u32 tx_flags)
+#define IGB_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+ (E1000_ADVTXD_DCMD_VLE));
+
+ /* set segmentation bits for TSO */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+ (E1000_ADVTXD_DCMD_TSE));
-#ifdef CONFIG_IGB_PTP
/* set timestamp bit if present */
- if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
- cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
-#endif /* CONFIG_IGB_PTP */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
- /* set segmentation bits for TSO */
- if (tx_flags & IGB_TX_FLAGS_TSO)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */
+ cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -4178,19 +4157,19 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
{
u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring if any offload is enabled */
- if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
- test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */
- if (tx_flags & IGB_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_CSUM,
+ (E1000_TXD_POPTS_TXSM << 8));
- /* insert IPv4 checksum */
- if (tx_flags & IGB_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
- }
+ /* insert IPv4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_IPV4,
+ (E1000_TXD_POPTS_IXSM << 8));
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -4209,33 +4188,37 @@ static void igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag;
dma_addr_t dma;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
- __le32 cmd_type;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
+ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IGB_TX_DESC(tx_ring, i);
- igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
- cmd_type = igb_tx_cmd_type(tx_flags);
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -4243,18 +4226,18 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IGB_MAX_DATA_PER_TXD;
size -= IGB_MAX_DATA_PER_TXD;
- tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -4262,32 +4245,22 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
+ size, DMA_TO_DEVICE);
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.olinfo_status = 0;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
- frag++;
}
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
- if (unlikely(skb->no_fcs))
- cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
@@ -4372,9 +4345,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
-#ifdef CONFIG_IGB_PTP
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
-#endif /* CONFIG_IGB_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4397,7 +4368,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
-#ifdef CONFIG_IGB_PTP
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4377,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
-#endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4436,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4770,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -4811,7 +4780,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
wr32(E1000_EIMS, adapter->eims_other);
@@ -4851,45 +4819,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
}
#ifdef CONFIG_IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
static void igb_update_dca(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
- if (q_vector->tx.ring) {
- int q = q_vector->tx.ring->reg_idx;
- u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_TXCTRL_CPUID_SHIFT;
- }
- dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
- wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
- }
- if (q_vector->rx.ring) {
- int q = q_vector->rx.ring->reg_idx;
- u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_RXCTRL_CPUID_SHIFT;
- }
- dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
- wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
- }
+ if (q_vector->tx.ring)
+ igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+ if (q_vector->rx.ring)
+ igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
q_vector->cpu = cpu;
out_no_update:
put_cpu();
@@ -5545,7 +5531,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5556,7 +5541,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5599,7 +5583,6 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5610,7 +5593,6 @@ static irqreturn_t igb_intr(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5840,6 +5822,181 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
return !!budget;
}
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
+{
+ struct igb_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+ old_buff->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+
+ if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /*
+ * since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -5889,224 +6046,386 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-static void igb_rx_vlan(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
{
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid;
- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
- else
- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ u32 ntc = rx_ring->next_to_clean + 1;
- __vlan_hwaccel_put_tag(skb, vid);
- }
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
}
-static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
-{
- /* HW will not DMA in data larger than the given buffer, even if it
- * parses the (NFS, of course) header to be larger. In that case, it
- * fills the header buffer and spills the rest into the page.
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv4->protocol;
+ hdr.network += hlen;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hdr.network += sizeof(struct ipv6hdr);
+ } else {
+ return hdr.network - data;
+ }
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
*/
- u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IGB_RX_HDR_LEN)
- hlen = IGB_RX_HDR_LEN;
- return hlen;
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- union e1000_adv_rx_desc *rx_desc;
- const int current_node = numa_node_id();
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
- u16 i = rx_ring->next_to_clean;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_desc = IGB_RX_DESC(rx_ring, i);
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- struct sk_buff *skb = buffer_info->skb;
- union e1000_adv_rx_desc *next_rxd;
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ /* retrieve timestamp from buffer */
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- buffer_info->skb = NULL;
- prefetch(skb->data);
+ /* update pointers to remove timestamp header */
+ skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+ frag->page_offset += IGB_TS_HDR_LEN;
+ skb->data_len -= IGB_TS_HDR_LEN;
+ skb->len -= IGB_TS_HDR_LEN;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* move va to start of packet data */
+ va += IGB_TS_HDR_LEN;
+ }
- next_rxd = IGB_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (!skb_is_nonlinear(skb)) {
- __skb_put(skb, igb_get_hlen(rx_desc));
- dma_unmap_single(rx_ring->dev, buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
}
+ }
- if (rx_desc->wb.upper.length) {
- u16 length = le16_to_cpu(rx_desc->wb.upper.length);
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ igb_pull_tail(rx_ring, rx_desc, skb);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += PAGE_SIZE / 2;
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
- if ((page_count(buffer_info->page) != 1) ||
- (page_to_nid(buffer_info->page) != current_node))
- buffer_info->page = NULL;
- else
- get_page(buffer_info->page);
+ return false;
+}
- dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
- struct igb_rx_buffer *next_buffer;
- next_buffer = &rx_ring->rx_buffer_info[i];
- buffer_info->skb = next_buffer->skb;
- buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- goto next_desc;
- }
+ igb_rx_hash(rx_ring, rx_desc, skb);
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK))
- && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
- dev_kfree_skb_any(skb);
- goto next_desc;
- }
+ igb_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IGB_PTP
- igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif /* CONFIG_IGB_PTP */
- igb_rx_hash(rx_ring, rx_desc, skb);
- igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_rx_vlan(rx_ring, rx_desc, skb);
+ igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
- total_bytes += skb->len;
- total_packets++;
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
- napi_gro_receive(&q_vector->napi, skb);
+ skb_record_rx_queue(skb, rx_ring->queue_index);
- budget--;
-next_desc:
- if (!budget)
- break;
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ union e1000_adv_rx_desc *rx_desc;
- cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
- }
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- rx_ring->next_to_clean = i;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ /* retrieve a buffer from the ring */
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
- return !!budget;
-}
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
-static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct sk_buff *skb = bi->skb;
- dma_addr_t dma = bi->dma;
+ cleaned_count++;
- if (dma)
- return true;
+ /* fetch next buffer in frame if non-eop */
+ if (igb_is_non_eop(rx_ring, rx_desc))
+ continue;
- if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
- bi->skb = skb;
- if (!skb) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
+ /* verify the packet layout is correct */
+ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
}
- /* initialize skb for ring */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- }
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
- dma = dma_map_single(rx_ring->dev, skb->data,
- IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
- if (dma_mapping_error(rx_ring->dev, dma)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ napi_gro_receive(&q_vector->napi, skb);
- bi->dma = dma;
- return true;
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return (total_packets < budget);
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t page_dma = bi->page_dma;
- unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+ dma_addr_t dma;
- if (page_dma)
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
return true;
- if (!page) {
- page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
- bi->page = page;
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ /* alloc new page for storage */
+ page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
}
- page_dma = dma_map_page(rx_ring->dev, page,
- page_offset, PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
- if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
- bi->page_dma = page_dma;
- bi->page_offset = page_offset;
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
return true;
}
@@ -6120,22 +6439,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
- while (cleaned_count--) {
- if (!igb_alloc_mapped_skb(rx_ring, bi))
- break;
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info. */
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
+ do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -6148,17 +6468,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
- }
+
+ cleaned_count--;
+ } while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
rx_ring->next_to_use = i;
- /* Force memory writes to complete before letting h/w
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /*
+ * Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, rx_ring->tail);
}
@@ -6207,10 +6535,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
-#ifdef CONFIG_IGB_PTP
case SIOCSHWTSTAMP:
return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
@@ -6492,7 +6818,9 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
if (netdev->flags & IFF_UP) {
+ rtnl_lock();
err = __igb_open(netdev, true);
+ rtnl_unlock();
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445..aa10f69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
}
-void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+{
+ __le64 *regval = (__le64 *)va;
+
+ /*
+ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+ igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
-
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
- }
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0ac11f5..4051ec4 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -184,6 +184,13 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ buffer_info->page_dma)) {
+ __free_page(buffer_info->page);
+ buffer_info->page = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ break;
+ }
}
if (!buffer_info->skb) {
@@ -197,6 +204,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ goto no_buffers;
+ }
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 89f40e5..f3a632b 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
-ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 30efc9f..7ff4c4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,11 +36,9 @@
#include <linux/aer.h>
#include <linux/if_vlan.h>
-#ifdef CONFIG_IXGBE_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IXGBE_PTP */
#include "ixgbe_type.h"
#include "ixgbe_common.h"
@@ -135,6 +133,7 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
+ unsigned int vf_api;
};
struct vf_macvlans {
@@ -482,7 +481,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
/* Tx fast path data */
@@ -571,7 +570,6 @@ struct ixgbe_adapter {
u32 interrupt_event;
u32 led_reg;
-#ifdef CONFIG_IXGBE_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
@@ -580,8 +578,6 @@ struct ixgbe_adapter {
struct timecounter tc;
int rx_hwtstamp_filter;
u32 base_incval;
- u32 cycle_speed;
-#endif /* CONFIG_IXGBE_PTP */
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -600,6 +596,8 @@ struct ixgbe_adapter {
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
#endif /*CONFIG_DEBUG_FS*/
+
+ u8 default_up;
};
struct ixgbe_fdir_filter {
@@ -691,6 +689,7 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
+extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -739,7 +738,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-#ifdef CONFIG_IXGBE_PTP
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -751,7 +749,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
-#endif /* CONFIG_IXGBE_PTP */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1077cb2..e75f5a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -62,7 +62,6 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -99,9 +98,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
- u32 reg_anlp1 = 0;
- u32 i = 0;
u16 list_offset, data_offset, data_value;
+ bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -137,28 +135,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Now restart DSP by setting Restart_AN and clearing LMS */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
-
- /* Wait for AN to leave state 0 */
- for (i = 0; i < 10; i++) {
- usleep_range(4000, 8000);
- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
- break;
+ /* Need SW/FW semaphore around AUTOC writes if LESM on,
+ * likewise reset_pipeline requires lock as it also writes
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto setup_sfp_out;
+
+ got_lock = true;
+ }
+
+ /* Restart DSP and set SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
+
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = false;
}
- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
- hw_dbg(hw, "sfp module setup not complete\n");
+
+ if (ret_val) {
+ hw_dbg(hw, " sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
-
- /* Restart DSP by setting Restart_AN and return to SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -394,14 +400,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
s32 status = 0;
+ bool got_lock = false;
+
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status)
+ goto out;
+
+ got_lock = true;
+ }
/* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -425,6 +443,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
/* Add delay to filter out noises during initial link setup */
msleep(50);
+out:
return status;
}
@@ -779,6 +798,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ bool got_lock = false;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -836,9 +856,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+ * likewise reset_pipeline requires us to hold this lock as
+ * it also writes to AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != 0)
+ goto out;
+
+ got_lock = true;
+ }
+
/* Restart link */
- autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -994,9 +1031,28 @@ mac_reset_top:
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
+ if (autoc != hw->mac.orig_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is
+ * on, likewise reset_pipeline requires us to hold
+ * this lock as it also writes to AUTOC.
+ */
+ bool got_lock = false;
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status)
+ goto reset_hw_out;
+
+ got_lock = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ }
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1983,7 +2039,7 @@ fw_version_out:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2080,6 +2136,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
return ret_val;
}
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
+ * to AUTOC, so this function assumes the semaphore is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 i, autoc_reg, ret_val;
+ s32 anlp1_reg = 0;
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ usleep_range(4000, 8000);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = 0;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index dbf37e4..8f285ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -90,6 +90,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
+ bool got_lock = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -210,8 +211,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
*
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on, likewise reset_pipeline requries the lock as
+ * it also writes AUTOC.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw) == 0)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
@@ -1778,8 +1800,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
else if (IXGBE_IS_BROADCAST(mac_addr))
status = IXGBE_ERR_INVALID_MAC_ADDR;
/* Reject the zero address */
- else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+ else if (is_zero_ether_addr(mac_addr))
status = IXGBE_ERR_INVALID_MAC_ADDR;
return status;
@@ -2617,6 +2638,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = 0;
/*
* Link must be up to auto-blink the LEDs;
@@ -2625,10 +2647,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ bool got_lock = false;
+
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2637,7 +2677,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return 0;
+out:
+ return ret_val;
}
/**
@@ -2649,18 +2690,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = 0;
+ bool got_lock = false;
+
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return 0;
+out:
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d813d11..587db47 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 116f0e9..a545728 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -887,24 +887,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+ struct ixgbe_ring *temp_ring;
int i, err = 0;
u32 new_rx_count, new_tx_count;
- bool need_update = false;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
- new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
- new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ IXGBE_MIN_TXD, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring[0]->count) &&
- (new_rx_count == adapter->rx_ring[0]->count)) {
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */
return 0;
}
@@ -922,81 +921,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
- if (!temp_tx_ring) {
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+ temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
+
+ if (!temp_ring) {
err = -ENOMEM;
goto clear_reset;
}
+ ixgbe_down(adapter);
+
+ /*
+ * Setup new Tx resources and free the old Tx resources in that order.
+ * We can then assign the new resources to the rings via a memcpy.
+ * The advantage to this approach is that we are guaranteed to still
+ * have resources even in the case of an allocation failure.
+ */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
- temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
+
+ temp_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(&temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_ring[i]);
}
- goto clear_reset;
+ goto err_setup;
}
}
- need_update = true;
- }
- temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
- if (!temp_rx_ring) {
- err = -ENOMEM;
- goto err_setup;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
}
+ /* Repeat the process for the Rx rings if needed */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
- temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
+
+ temp_ring[i].count = new_rx_count;
+ err = ixgbe_setup_rx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(&temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_ring[i]);
}
goto err_setup;
}
+
}
- need_update = true;
- }
- /* if rings need to be updated, here's the place to do it in one shot */
- if (need_update) {
- ixgbe_down(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
- /* tx */
- if (new_tx_count != adapter->tx_ring_count) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter->tx_ring[i]);
- memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
- sizeof(struct ixgbe_ring));
- }
- adapter->tx_ring_count = new_tx_count;
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
}
- /* rx */
- if (new_rx_count != adapter->rx_ring_count) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter->rx_ring[i]);
- memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
- sizeof(struct ixgbe_ring));
- }
- adapter->rx_ring_count = new_rx_count;
- }
- ixgbe_up(adapter);
+ adapter->rx_ring_count = new_rx_count;
}
- vfree(temp_rx_ring);
err_setup:
- vfree(temp_tx_ring);
+ ixgbe_up(adapter);
+ vfree(temp_ring);
clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
@@ -2669,7 +2667,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
struct ixgbe_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IXGBE_PTP
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -2695,7 +2692,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
-#endif /* CONFIG_IXGBE_PTP */
default:
return ethtool_op_get_ts_info(dev, info);
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ae73ef1..252850d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
+
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 17ecbce..8c74f73 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
- else
- cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
+#ifdef CONFIG_IXGBE_DCA
+ /* initialize CPU for DCA */
+ q_vector->cpu = -1;
+
+#endif
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
@@ -821,6 +824,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
+ /* intialize ITR */
+ if (txr_count && !rxr_count) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
+ }
+
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fa3d552..38fc186 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -355,13 +356,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Transmit Descriptor Formats
*
- * Advanced Transmit Descriptor
+ * 82598 Advanced Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
- * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * 82598 Advanced Transmit Descriptor (Write-Back Format)
+ * +--------------------------------------------------------------+
+ * 0 | RSV [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | RSV | STA | NXTSEQ |
+ * +--------------------------------------------------------------+
+ * 63 36 35 32 31 0
+ *
+ * 82599+ Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
+ *
+ * 82599+ Advanced Transmit Descriptor (Write-Back Format)
+ * +--------------------------------------------------------------+
+ * 0 | RSV [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | RSV | STA | RSV |
+ * +--------------------------------------------------------------+
+ * 63 36 35 32 31 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
@@ -422,7 +447,9 @@ rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
- /* Advanced Receive Descriptor (Read) Format
+ /* Receive Descriptor Formats
+ *
+ * 82598 Advanced Receive Descriptor (Read) Format
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
@@ -431,17 +458,40 @@ rx_ring_summary:
* +-----------------------------------------------------+
*
*
- * Advanced Receive Descriptor (Write-Back) Format
+ * 82598 Advanced Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 30 21 20 16 15 4 3 0
* +------------------------------------------------------+
- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
- * | Checksum Ident | | | | Type | Type |
+ * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
+ * | Packet | IP | | | | Type | Type |
+ * | Checksum | Ident | | | | | |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
+ *
+ * 82599+ Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * 82599+ Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
+ * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
+ * |/ Flow Dir Flt ID | | | | | |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
*/
+
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
@@ -791,10 +841,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-#endif
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -967,7 +1015,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
* which will cause the DCA tag to be cleared.
*/
rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
- IXGBE_DCA_RXCTRL_DATA_DCA_EN |
IXGBE_DCA_RXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1244,6 +1291,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
struct vlan_hdr *vlan;
/* l3 headers */
struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
} hdr;
__be16 protocol;
u8 nexthdr = 0; /* default to not TCP */
@@ -1284,6 +1332,13 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
/* record next protocol */
nexthdr = hdr.ipv4->protocol;
hdr.network += hlen;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hdr.network += sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
} else if (protocol == __constant_htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
@@ -1294,7 +1349,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return hdr.network - data;
}
- /* finally sort out TCP */
+ /* finally sort out TCP/UDP */
if (nexthdr == IPPROTO_TCP) {
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
return max_len;
@@ -1307,6 +1362,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return hdr.network - data;
hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
}
/*
@@ -1369,9 +1429,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
-#endif
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1781,7 +1839,7 @@ dma_sync:
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
- int budget)
+ const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
@@ -1832,7 +1890,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
/* populate checksum, timestamp, VLAN, and protocol */
ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
@@ -1865,8 +1922,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
- budget--;
- } while (likely(budget));
+ total_rx_packets++;
+ } while (likely(total_rx_packets < budget));
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1878,7 +1935,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
- return !!budget;
+ return (total_rx_packets < budget);
}
/**
@@ -1914,20 +1971,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
- if (q_vector->tx.ring && !q_vector->rx.ring) {
- /* tx only vector */
- if (adapter->tx_itr_setting == 1)
- q_vector->itr = IXGBE_10K_ITR;
- else
- q_vector->itr = adapter->tx_itr_setting;
- } else {
- /* rx or rx/tx vector */
- if (adapter->rx_itr_setting == 1)
- q_vector->itr = IXGBE_20K_ITR;
- else
- q_vector->itr = adapter->rx_itr_setting;
- }
-
ixgbe_write_eitr(q_vector);
}
@@ -2324,10 +2367,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
-#ifdef CONFIG_IXGBE_PTP
if (adapter->hw.mac.type == ixgbe_mac_X540)
mask |= IXGBE_EIMS_TIMESYNC;
-#endif
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2393,10 +2434,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
/* re-enable the original interrupt state, no lsc, no queues */
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2588,10 +2627,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
}
ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
/* would disable interrupts here but EIAM disabled it */
napi_schedule(&q_vector->napi);
@@ -2699,12 +2736,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- /* rx/tx vector */
- if (adapter->rx_itr_setting == 1)
- q_vector->itr = IXGBE_20K_ITR;
- else
- q_vector->itr = adapter->rx_itr_setting;
-
ixgbe_write_eitr(q_vector);
ixgbe_set_ivar(adapter, 0, 0, 0);
@@ -3211,7 +3242,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3234,8 +3264,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- /* enable Tx loopback for VF/PF communication */
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -3263,6 +3291,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3271,9 +3304,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
- /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
- max_frame += VLAN_HLEN;
-
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -4072,11 +4102,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
- /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.enable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* enable the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -4192,6 +4219,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
+
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_reset(adapter);
}
/**
@@ -4393,11 +4423,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* power down the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
ixgbe_clean_all_tx_rings(adapter);
@@ -4828,14 +4855,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
/*
- * For 82599EB we cannot allow PF to change MTU greater than 1500
- * in SR-IOV mode as it may cause buffer overruns in guest VFs that
- * don't allocate and chain buffers correctly.
+ * For 82599EB we cannot allow legacy VFs to enable their receive
+ * paths when MTU greater than 1500 is configured. So display a
+ * warning that legacy VFs will be disabled.
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
- return -EINVAL;
+ e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -4901,6 +4928,8 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_set_queues;
+ ixgbe_ptp_init(adapter);
+
ixgbe_up_complete(adapter);
return 0;
@@ -4932,6 +4961,8 @@ static int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ ixgbe_ptp_stop(adapter);
+
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
@@ -5022,14 +5053,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (wufc) {
ixgbe_set_rx_mode(netdev);
- /*
- * enable the optics for both mult-speed fiber and
- * 82599 SFP+ fiber as we can WoL.
- */
- if (hw->mac.ops.enable_tx_laser &&
- (hw->phy.multispeed_fiber ||
- (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
- hw->mac.type == ixgbe_mac_82599EB)))
+ /* enable the optics for 82599 SFP+ fiber as we can WoL */
+ if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
/* turn on all-multi mode if wake on multicast is enabled */
@@ -5442,6 +5467,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
adapter->link_speed = link_speed;
}
+static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+ struct net_device *netdev = adapter->netdev;
+ struct dcb_app app = {
+ .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+ .protocol = 0,
+ };
+ u8 up = 0;
+
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
+ up = dcb_ieee_getapp_mask(netdev, &app);
+
+ adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
+#endif
+}
+
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -5482,9 +5524,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
break;
}
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5501,6 +5542,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ /* update the default user priority for VFs */
+ ixgbe_update_default_up(adapter);
+
/* ping all the active vfs to let them know link has changed */
ixgbe_ping_all_vfs(adapter);
}
@@ -5526,9 +5570,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev);
@@ -5833,9 +5876,7 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
-#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_overflow_check(adapter);
-#endif
ixgbe_service_event_complete(adapter);
}
@@ -5988,10 +6029,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-#ifdef CONFIG_IXGBE_PTP
if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
-#endif
/* set segmentation enable bits for TSO/FSO */
#ifdef IXGBE_FCOE
@@ -6393,12 +6432,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
}
-#endif
#ifdef CONFIG_PCI_IOV
/*
@@ -6485,6 +6522,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
tx_ring = adapter->tx_ring[skb->queue_mapping];
@@ -6547,10 +6585,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
switch (cmd) {
-#ifdef CONFIG_IXGBE_PTP
case SIOCSHWTSTAMP:
return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
-#endif
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -6916,7 +6952,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EINVAL;
}
- if (is_unicast_ether_addr(addr)) {
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
if (netdev_uc_count(dev) < rar_uc_entries)
@@ -6974,6 +7010,59 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
return idx;
}
+static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+ u32 reg = 0;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA)
+ reg = 0;
+ else if (mode == BRIDGE_MODE_VEB)
+ reg = IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ return -EINVAL;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
+
+ e_info(drv, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ }
+
+ return 0;
+}
+
+static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u16 mode;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return 0;
+
+ if (IXGBE_READ_REG(&adapter->hw, IXGBE_PFDTXGSWC) & 1)
+ mode = BRIDGE_MODE_VEB;
+ else
+ mode = BRIDGE_MODE_VEPA;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -7013,6 +7102,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_add = ixgbe_ndo_fdb_add,
.ndo_fdb_del = ixgbe_ndo_fdb_del,
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
+ .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
+ .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
};
/**
@@ -7042,6 +7133,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
break;
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
+ case IXGBE_SUBDEV_ID_82599_ECNA_DP:
is_wol_supported = 1;
break;
}
@@ -7364,10 +7456,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_init(adapter);
-#endif /* CONFIG_IXGBE_PTP*/
-
/* save off EEPROM version number */
hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7420,11 +7508,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* power down the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
/* carrier off reporting is important to ethtool even BEFORE open */
@@ -7505,9 +7590,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_DOWN, &adapter->state);
cancel_work_sync(&adapter->service_task);
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_stop(adapter);
-#endif
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 310bdd9..42dd65e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -62,12 +62,39 @@
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * Each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d929131..01d99af 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -387,6 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
struct ptp_clock_event event;
+ event.type = PTP_CLOCK_PPS;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!adapter->ptp_clock)
+ return;
+
switch (hw->mac.type) {
case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
@@ -411,7 +420,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
struct timespec ts;
- if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
+ if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
(elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
@@ -554,12 +563,14 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
adapter = q_vector->adapter;
hw = &adapter->hw;
+ if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
/* Check if we have a valid timestamp and make sure the skb should
* have been timestamped */
- if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
- !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
/*
@@ -759,58 +770,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
* @adapter: pointer to the adapter structure
*
- * this function initializes the timecounter and cyclecounter
- * structures for use in generated a ns counter from the arbitrary
- * fixed point cycles registers in the hardware.
- *
- * A change in link speed impacts the frequency of the DMA clock on
- * the device, which is used to generate the cycle counter
- * registers. Therefor this function is called whenever the link speed
- * changes.
- *
- * This function also turns on the SDP pin for clock out feature (X540
- * only), because this is where the shift is first calculated.
+ * This function should be called to set the proper values for the TIMINCA
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TIMINCA value is necessary,
+ * such as during initialization or when the link speed changes.
*/
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
- u32 timinca = 0;
u32 shift = 0;
- u32 cycle_speed;
unsigned long flags;
/**
- * Determine what speed we need to set the cyclecounter
- * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
- * unknown speeds as 10Gb. (Hence why we can't just copy the
- * link_speed.
- */
- switch (adapter->link_speed) {
- case IXGBE_LINK_SPEED_100_FULL:
- case IXGBE_LINK_SPEED_1GB_FULL:
- case IXGBE_LINK_SPEED_10GB_FULL:
- cycle_speed = adapter->link_speed;
- break;
- default:
- /* cycle speed should be 10Gb when there is no link */
- cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- }
-
- /*
- * grab the current TIMINCA value from the register so that it can be
- * double checked. If the register value has been cleared, it must be
- * reset to the correct value for generating a cyclecounter. If
- * TIMINCA is zero, the SYSTIME registers do not increment at all.
- */
- timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
-
- /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
- if (adapter->cycle_speed == cycle_speed && timinca)
- return;
-
- /**
* Scale the NIC cycle counter by a large factor so that
* relatively small corrections to the frequency can be added
* or subtracted. The drawbacks of a large factor include
@@ -819,8 +792,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* to nanoseconds using only a multiplier and a right-shift,
* and (c) the value must fit within the timinca register space
* => math based on internal DMA clock rate and available bits
+ *
+ * Note that when there is no link, internal DMA clock is same as when
+ * link speed is 10Gb. Set the registers correctly even when link is
+ * down to preserve the clock setting
*/
- switch (cycle_speed) {
+ switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
incval = IXGBE_INCVAL_100;
shift = IXGBE_INCVAL_SHIFT_100;
@@ -830,6 +807,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
shift = IXGBE_INCVAL_SHIFT_1GB;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
+ default:
incval = IXGBE_INCVAL_10GB;
shift = IXGBE_INCVAL_SHIFT_10GB;
break;
@@ -857,18 +835,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
return;
}
- /* reset the system time registers */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
- IXGBE_WRITE_FLUSH(hw);
-
- /* store the new cycle speed */
- adapter->cycle_speed = cycle_speed;
-
+ /* update the base incval used to calculate frequency adjustment */
ACCESS_ONCE(adapter->base_incval) = incval;
smp_mb();
- /* grab the ptp lock */
+ /* need lock to prevent incorrect read while modifying cyclecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
memset(&adapter->cc, 0, sizeof(adapter->cc));
@@ -877,6 +848,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
adapter->cc.shift = shift;
adapter->cc.mult = 1;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+}
+
+/**
+ * ixgbe_ptp_reset
+ * @adapter: the ixgbe private board structure
+ *
+ * When the MAC resets, all timesync features are reset. This function should be
+ * called to re-enable the PTP clock structure. It will re-init the timecounter
+ * structure based on the kernel time as well as setup the cycle counter data.
+ */
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ /* set SYSTIME registers to 0 just in case */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ptp_start_cyclecounter(adapter);
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
/* reset the ns time counter */
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
@@ -904,7 +900,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -918,7 +914,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -942,11 +938,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->tmreg_lock);
- ixgbe_ptp_start_cyclecounter(adapter);
-
- /* (Re)start the overflow check */
- adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -955,6 +946,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ ixgbe_ptp_reset(adapter);
+
+ /* set the flag that PTP has been enabled */
+ adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+
return;
}
@@ -967,7 +963,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
/* stop the overflow check task */
- adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED |
+ adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
IXGBE_FLAG2_PTP_PPS_ENABLED);
ixgbe_ptp_setup_sdp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dce48bf..4993642 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,9 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
}
}
+ /* Initialize default switching mode VEB */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
*/
@@ -150,16 +153,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
-#ifdef IXGBE_FCOE
- /*
- * When SR-IOV is enabled 82599 cannot support jumbo frames
- * so we must disable FCoE because we cannot support FCoE MTU.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
- IXGBE_FLAG_FCOE_CAPABLE);
-#endif
-
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
@@ -265,8 +258,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf)
+ u32 *msgbuf, u32 vf)
{
+ int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
int i;
@@ -353,31 +349,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int new_mtu = msgbuf[1];
+ int max_frame = msgbuf[1];
u32 max_frs;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- /* Only X540 supports jumbo frames in IOV mode */
- if (adapter->hw.mac.type != ixgbe_mac_X540)
- return;
+ /*
+ * For 82599EB we have to keep all PFs and VFs operating with
+ * the same max_frame value in order to avoid sending an oversize
+ * frame to a VF. In order to guarantee this is handled correctly
+ * for all cases we have several special exceptions to take into
+ * account before we can enable the VF for receive
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+ u32 reg_offset, vf_shift, vfre;
+ s32 err = 0;
+
+#ifdef CONFIG_FCOE
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_11:
+ /*
+ * Version 1.1 supports jumbo frames on VFs if PF has
+ * jumbo frames enabled which means legacy VFs are
+ * disabled
+ */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ break;
+ default:
+ /*
+ * If the PF or VF are running w/ jumbo frames enabled
+ * we need to shut down the VF Rx path as we cannot
+ * support jumbo frames on legacy VFs
+ */
+ if ((pf_max_frame > ETH_FRAME_LEN) ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+ err = -EINVAL;
+ break;
+ }
+
+ /* determine VF receive enable location */
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable or disable receive depending on error */
+ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ if (err)
+ vfre &= ~(1 << vf_shift);
+ else
+ vfre |= 1 << vf_shift;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
+
+ if (err) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return err;
+ }
+ }
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
- e_err(drv, "VF mtu %d out of range\n", new_mtu);
- return;
+ if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
}
- max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
- IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
- if (max_frs < new_mtu) {
- max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ /* pull current max frame size from hardware */
+ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ max_frs &= IXGBE_MHADD_MFS_MASK;
+ max_frs >>= IXGBE_MHADD_MFS_SHIFT;
+
+ if (max_frs < max_frame) {
+ max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
- e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+ e_info(hw, "VF requests change max MTU to %d\n", max_frame);
+
+ return 0;
}
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -392,35 +446,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+ u16 vid, u16 qos, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
- if (vid)
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
- (vid | IXGBE_VMVIR_VLANA_DEFAULT));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
}
+static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* add PF assigned VLAN or VLAN 0 */
+ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
/* reset offloads to defaults */
- if (adapter->vfinfo[vf].pf_vlan) {
- ixgbe_set_vf_vlan(adapter, true,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter,
- (adapter->vfinfo[vf].pf_vlan |
- (adapter->vfinfo[vf].pf_qos <<
- VLAN_PRIO_SHIFT)), vf);
- ixgbe_set_vmolr(hw, vf, false);
+ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ ixgbe_clear_vmvir(adapter, vf);
} else {
- ixgbe_set_vf_vlan(adapter, true, 0, vf);
- ixgbe_set_vmvir(adapter, 0, vf);
- ixgbe_set_vmolr(hw, vf, true);
+ if (vfinfo->pf_qos || !num_tcs)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ adapter->default_up, vf);
+
+ if (vfinfo->spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
}
/* reset multicast table array for vf */
@@ -430,6 +496,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_rx_mode(adapter->netdev);
hw->mac.ops.clear_rar(hw, rar_entry);
+
+ /* reset VF api back to unknown */
+ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -521,30 +590,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
-static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg;
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u32 reg, msgbuf[4];
u32 reg_offset, vf_shift;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ e_info(probe, "VF Reset msg received from vf %d\n", vf);
+
+ /* reset the filters for the device */
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* set vf mac address */
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= (reg | (1 << vf_shift));
+ reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= (reg | (1 << vf_shift));
+ reg |= 1 << vf_shift;
+ /*
+ * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#ifdef CONFIG_FCOE
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg &= ~(1 << vf_shift);
+ }
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ /* enable VF mailbox for further messages */
+ adapter->vfinfo[vf].clear_to_send = true;
+
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
- ixgbe_vf_reset_event(adapter, vf);
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+ if (!is_valid_ether_addr(new_mac)) {
+ e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+ return -1;
+ }
+
+ if (adapter->vfinfo[vf].pf_set_mac &&
+ memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
+ ETH_ALEN)) {
+ e_warn(drv,
+ "VF %d attempted to override administratively set MAC address\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ return -1;
+ }
+
+ return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
+}
+
+static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ int err;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->vfinfo[vf].pf_vlan || tcs) {
+ e_warn(drv,
+ "VF %d attempted to override administratively set VLAN configuration\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ return -1;
+ }
+
+ if (add)
+ adapter->vfinfo[vf].vlan_count++;
+ else if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+
+ err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ if (!err && adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+
+ return err;
+}
+
+static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ int err;
+
+ if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+ e_warn(drv,
+ "VF %d requested MACVLAN filter but is administratively denied\n",
+ vf);
+ return -1;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+ return -1;
+ }
+
+ /*
+ * If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (adapter->vfinfo[vf].spoofchk_enabled)
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ }
+
+ err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+ if (err == -ENOSPC)
+ e_warn(drv,
+ "VF %d has requested a MACVLAN filter but there is no space for it\n",
+ vf);
+
+ return err < 0;
+}
+
+static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case ixgbe_mbox_api_10:
+ case ixgbe_mbox_api_11:
+ adapter->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ break;
+ }
+
+ e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+
+ return -1;
+}
+
+static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ /* verify the PF is supporting the correct APIs */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_20:
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return -1;
+ }
+
+ /* only allow 1 Tx queue for bandwidth limiting */
+ msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ /* if TCs > 1 determine which TC belongs to default user priority */
+ if (num_tcs > 1)
+ default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
+
+ /* notify VF of need for VLAN tag stripping, and correct queue */
+ if (num_tcs)
+ msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+ else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
+ msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
}
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -553,10 +813,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
s32 retval;
- int entries;
- u16 *hash_list;
- int add, vid, index;
- u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -572,39 +828,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
+ if (msgbuf[0] == IXGBE_VF_RESET)
+ return ixgbe_vf_reset_msg(adapter, vf);
+
/*
* until the vf completes a virtual function reset it should not be
* allowed to start any configuration.
*/
-
- if (msgbuf[0] == IXGBE_VF_RESET) {
- unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- new_mac = (u8 *)(&msgbuf[1]);
- e_info(probe, "VF Reset msg received from vf %d\n", vf);
- adapter->vfinfo[vf].clear_to_send = false;
- ixgbe_vf_reset_msg(adapter, vf);
- adapter->vfinfo[vf].clear_to_send = true;
-
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac)
- ixgbe_set_vf_mac(adapter, vf, vf_mac);
- else
- ixgbe_set_vf_mac(adapter,
- vf, adapter->vfinfo[vf].vf_mac_addresses);
-
- /* reply to reset with ack and vf mac address */
- msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, ETH_ALEN);
- /*
- * Piggyback the multicast filter type so VF can compute the
- * correct vectors
- */
- msgbuf[3] = hw->mac.mc_filter_type;
- ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
- return retval;
- }
-
if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -613,70 +843,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
- new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac) {
- ixgbe_set_vf_mac(adapter, vf, new_mac);
- } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
- new_mac, ETH_ALEN)) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set MAC address\nReload "
- "the VF driver to resume operations\n", vf);
- retval = -1;
- }
+ retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MULTICAST:
- entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- hash_list = (u16 *)&msgbuf[1];
- retval = ixgbe_set_vf_multicasts(adapter, entries,
- hash_list, vf);
- break;
- case IXGBE_VF_SET_LPE:
- ixgbe_set_vf_lpe(adapter, msgbuf);
+ retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_VLAN:
- add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- if (adapter->vfinfo[vf].pf_vlan) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set VLAN configuration\n"
- "Reload the VF driver to resume operations\n",
- vf);
- retval = -1;
- } else {
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
- retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- }
+ retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MACVLAN:
- index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
- if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
- e_warn(drv, "VF %d requested MACVLAN filter but is "
- "administratively denied\n", vf);
- retval = -1;
- break;
- }
- /*
- * If the VF is allowed to set MAC filters then turn off
- * anti-spoofing to avoid false positives. An index
- * greater than 0 will indicate the VF is setting a
- * macvlan MAC filter.
- */
- if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
- retval = ixgbe_set_vf_macvlan(adapter, vf, index,
- (unsigned char *)(&msgbuf[1]));
- if (retval == -ENOSPC)
- e_warn(drv, "VF %d has requested a MACVLAN filter "
- "but there is no space for it\n", vf);
+ retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_API_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_GET_QUEUES:
+ retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -692,7 +877,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
- ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
return retval;
}
@@ -783,7 +968,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
- ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -803,7 +988,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
} else {
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0722f33..21915e2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,6 +56,7 @@
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index da17ccf..3147795 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,8 +33,11 @@
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_VF_IRQ_CLEAR_MASK 7
-#define IXGBE_VF_MAX_TX_QUEUES 1
-#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4a9c9c2..fc0af9a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,7 +58,6 @@ struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
struct device *dev;
- struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
@@ -75,6 +74,8 @@ struct ixgbevf_ring {
u64 total_bytes;
u64 total_packets;
struct u64_stats_sync syncp;
+ u64 hw_csum_rx_error;
+ u64 hw_csum_rx_good;
u16 head;
u16 tail;
@@ -89,8 +90,8 @@ struct ixgbevf_ring {
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define MAX_RX_QUEUES 1
-#define MAX_TX_QUEUES 1
+#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
+#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -101,10 +102,10 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_3K 3072
-#define IXGBEVF_RXBUFFER_7K 7168
-#define IXGBEVF_RXBUFFER_15K 15360
-#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define IXGBEVF_RXBUFFER_2K 2048
+#define IXGBEVF_RXBUFFER_4K 4096
+#define IXGBEVF_RXBUFFER_8K 8192
+#define IXGBEVF_RXBUFFER_10K 10240
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -229,6 +230,7 @@ struct ixgbevf_adapter {
*/
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
/* OS defined structs */
struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de1ad50..f267c00 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.6.0-k"
+#define DRV_VERSION "2.7.12-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -120,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to
* @msix_vector: the vector to map to the corresponding queue
- *
*/
static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector)
@@ -287,17 +287,19 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- napi_gro_receive(&q_vector->napi, skb);
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(&q_vector->napi, skb);
+ else
+ netif_rx(skb);
}
/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
- * @adapter: address of board private structure
+ * @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified
**/
-static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *ring,
+static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -309,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- adapter->hw_csum_rx_error++;
+ ring->hw_csum_rx_error++;
return;
}
@@ -317,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- adapter->hw_csum_rx_error++;
+ ring->hw_csum_rx_error++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
+ ring->hw_csum_rx_good++;
}
/**
@@ -337,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
- struct sk_buff *skb;
unsigned int i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- skb = bi->skb;
- if (!skb) {
+
+ if (!bi->skb) {
+ struct sk_buff *skb;
+
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
@@ -353,11 +356,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
goto no_buffers;
}
bi->skb = skb;
- }
- if (!bi->dma) {
+
bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ dev_kfree_skb(skb);
+ bi->skb = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ break;
+ }
}
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
@@ -370,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
-
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
@@ -454,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
+ ixgbevf_rx_checksum(rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -471,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* Workaround hardware that can't do proper VEPA multicast
+ * source pruning.
+ */
+ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+ !(compare_ether_addr(adapter->netdev->dev_addr,
+ eth_hdr(skb)->h_source))) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
next_desc:
@@ -533,9 +550,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
+ adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
+ adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
@@ -743,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
return IRQ_HANDLED;
}
-
/**
* ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
@@ -1065,20 +1083,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
max_frame += VLAN_HLEN;
/*
- * Make best use of allocation by using all but 1K of a
- * power of 2 allocation that will be used for skb->head.
+ * Allocate buffer sizes that fit well into 32K and
+ * take into account max frame size of 9.5K
*/
if ((hw->mac.type == ixgbe_mac_X540_vf) &&
(max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else if (max_frame <= IXGBEVF_RXBUFFER_3K)
- rx_buf_len = IXGBEVF_RXBUFFER_3K;
- else if (max_frame <= IXGBEVF_RXBUFFER_7K)
- rx_buf_len = IXGBEVF_RXBUFFER_7K;
- else if (max_frame <= IXGBEVF_RXBUFFER_15K)
- rx_buf_len = IXGBEVF_RXBUFFER_15K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_2K)
+ rx_buf_len = IXGBEVF_RXBUFFER_2K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_4K)
+ rx_buf_len = IXGBEVF_RXBUFFER_4K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_8K)
+ rx_buf_len = IXGBEVF_RXBUFFER_8K;
else
- rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+ rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1128,15 +1146,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw;
int err;
- if (!hw->mac.ops.set_vfta)
- return -EOPNOTSUPP;
-
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* add VID to filter table */
err = hw->mac.ops.set_vfta(hw, vid, 0, true);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
/* translate error return types so error makes sense */
if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1171,12 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw;
int err = -EOPNOTSUPP;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* remove VID from filter table */
- if (hw->mac.ops.set_vfta)
- err = hw->mac.ops.set_vfta(hw, vid, 0, false);
+ err = hw->mac.ops.set_vfta(hw, vid, 0, false);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
clear_bit(vid, adapter->active_vlans);
@@ -1206,27 +1220,27 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
}
/**
- * ixgbevf_set_rx_mode - Multicast set
+ * ixgbevf_set_rx_mode - Multicast and unicast set
* @netdev: network interface device structure
*
* The set_rx_method entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast mode.
+ * list, unicast address list or the network interface flags are updated.
+ * This routine is responsible for configuring the hardware for proper
+ * multicast mode and configuring requested unicast filters.
**/
static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* reprogram multicast list */
- if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, netdev);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1290,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
"not set within the polling period\n", rxr);
}
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
+ adapter->rx_ring[rxr].count - 1);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1349,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_10,
+ int api[] = { ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
int err = 0, idx = 0;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1363,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,16 +1404,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- if (hw->mac.ops.set_rar) {
- if (is_valid_ether_addr(hw->mac.addr))
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
- else
- hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
- }
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1426,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
+static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err, i;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* nothing to do if we have the correct number of queues */
+ if (adapter->num_rx_queues == num_rx_queues)
+ return 0;
+
+ /* allocate new rings */
+ rx_ring = kcalloc(num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return -ENOMEM;
+
+ /* setup ring fields */
+ for (i = 0; i < num_rx_queues; i++) {
+ rx_ring[i].count = adapter->rx_ring_count;
+ rx_ring[i].queue_index = i;
+ rx_ring[i].reg_idx = i;
+ rx_ring[i].dev = &adapter->pdev->dev;
+ rx_ring[i].netdev = adapter->netdev;
+
+ /* allocate resources on the ring */
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ return err;
+ }
+ }
+
+ /* free the existing rings and queues */
+ ixgbevf_free_all_rx_resources(adapter);
+ adapter->num_rx_queues = 0;
+ kfree(adapter->rx_ring);
+
+ /* move new rings into position on the adapter struct */
+ adapter->rx_ring = rx_ring;
+ adapter->num_rx_queues = num_rx_queues;
+
+ /* reset ring to vector mapping */
+ ixgbevf_reset_q_vectors(adapter);
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ return 0;
+}
+
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
ixgbevf_negotiate_api(adapter);
+ ixgbevf_reset_queues(adapter);
+
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1497,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
return;
/* Free all the Tx ring sk_buffs */
-
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
@@ -1593,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
- /*
- * Check if PF is up before re-init. If not then skip until
- * later when the PF is up and ready to service requests from
- * the VF via mailbox. If the VF is up and running then the
- * watchdog task will continue to schedule reset tasks until
- * the PF is up and running.
- */
ixgbevf_down(adapter);
ixgbevf_up(adapter);
@@ -1611,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- spin_lock(&adapter->mbx_lock);
-
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
- spin_unlock(&adapter->mbx_lock);
-
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1628,10 +1704,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
- int vectors)
+static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
{
- int err, vector_threshold;
+ int err = 0;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -1647,21 +1724,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
vectors);
- if (!err) /* Success in acquiring all requested vectors. */
+ if (!err || err < 0) /* Success or a nasty failure. */
break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err;
}
- if (vectors < vector_threshold) {
- /* Can't allocate enough MSI-X interrupts? Oh well.
- * This just means we'll go with either a single MSI
- * vector or fall back to legacy interrupts.
- */
- hw_dbg(&adapter->hw,
- "Unable to allocate MSI-X interrupts\n");
+ if (vectors < vector_threshold)
+ err = -ENOMEM;
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
@@ -1672,6 +1746,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
*/
adapter->num_msix_vectors = vectors;
}
+
+ return err;
}
/**
@@ -1717,6 +1793,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
+ /* reg_idx may be remapped later by DCB config */
adapter->tx_ring[i].reg_idx = i;
adapter->tx_ring[i].dev = &adapter->pdev->dev;
adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1774,7 +1851,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
- ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ if (err)
+ goto out;
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
@@ -1834,18 +1913,13 @@ err_out:
**/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors;
- int napi_vectors;
-
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
+ int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
- if (q_idx < napi_vectors)
- netif_napi_del(&q_vector->napi);
+ netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
}
@@ -1950,8 +2024,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
hw->mbx.ops.init_params(hw);
- hw->mac.max_tx_queues = MAX_TX_QUEUES;
- hw->mac.max_rx_queues = MAX_RX_QUEUES;
+
+ /* assume legacy case in which PF would only give VF 2 queues */
+ hw->mac.max_tx_queues = 2;
+ hw->mac.max_rx_queues = 2;
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -1966,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
goto out;
}
memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
- adapter->netdev->addr_len);
+ adapter->netdev->addr_len);
}
/* lock to protect mailbox accesses */
@@ -2016,6 +2093,7 @@ out:
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc);
@@ -2029,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfgotc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
adapter->stats.vfmprc);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->hw_csum_rx_error +=
+ adapter->rx_ring[i].hw_csum_rx_error;
+ adapter->hw_csum_rx_good +=
+ adapter->rx_ring[i].hw_csum_rx_good;
+ adapter->rx_ring[i].hw_csum_rx_error = 0;
+ adapter->rx_ring[i].hw_csum_rx_good = 0;
+ }
}
/**
@@ -2103,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = adapter->link_speed;
bool link_up = adapter->link_up;
+ s32 need_reset;
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2110,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* Always check the link on the watchdog because we have
* no LSC interrupt
*/
- if (hw->mac.ops.check_link) {
- s32 need_reset;
-
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- need_reset = hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
+ need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
- if (need_reset) {
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- schedule_work(&adapter->reset_task);
- goto pf_has_reset;
- }
- } else {
- /* always assume link is up, if no check link
- * function */
- link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- link_up = true;
+ if (need_reset) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
}
adapter->link_up = link_up;
adapter->link_speed = link_speed;
@@ -2377,6 +2455,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
&adapter->rx_ring[i]);
}
+static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err, i;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* nothing to do if we have the correct number of queues */
+ if (adapter->num_rx_queues == num_rx_queues)
+ return 0;
+
+ /* allocate new rings */
+ rx_ring = kcalloc(num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return -ENOMEM;
+
+ /* setup ring fields */
+ for (i = 0; i < num_rx_queues; i++) {
+ rx_ring[i].count = adapter->rx_ring_count;
+ rx_ring[i].queue_index = i;
+ rx_ring[i].reg_idx = i;
+ rx_ring[i].dev = &adapter->pdev->dev;
+ rx_ring[i].netdev = adapter->netdev;
+ }
+
+ /* free the existing ring and queues */
+ adapter->num_rx_queues = 0;
+ kfree(adapter->rx_ring);
+
+ /* move new rings into position on the adapter struct */
+ adapter->rx_ring = rx_ring;
+ adapter->num_rx_queues = num_rx_queues;
+
+ return 0;
+}
+
/**
* ixgbevf_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -2413,6 +2548,11 @@ static int ixgbevf_open(struct net_device *netdev)
ixgbevf_negotiate_api(adapter);
+ /* setup queue reg_idx and Rx queue count */
+ err = ixgbevf_setup_queues(adapter);
+ if (err)
+ goto err_setup_queues;
+
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2451,6 +2591,7 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
+err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2562,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
-
-
-
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2678,10 +2816,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma =
skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
- tx_buffer_info->mapped_as_page = true;
if (dma_mapping_error(tx_ring->dev,
tx_buffer_info->dma))
goto dma_error;
+ tx_buffer_info->mapped_as_page = true;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2754,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
-
}
/*
@@ -2823,6 +2960,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+ if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
tx_ring = &adapter->tx_ring[r_idx];
@@ -2902,12 +3044,11 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- if (hw->mac.ops.set_rar)
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
return 0;
}
@@ -2925,8 +3066,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
- if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_11:
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+ break;
+ default:
+ if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+ break;
+ }
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > max_possible_frame))
@@ -3223,10 +3371,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- /* pick up the PCI bus settings for reporting later */
- if (hw->mac.ops.get_bus_info)
- hw->mac.ops.get_bus_info(hw);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 946ce86..0bc3005 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -85,6 +85,7 @@
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -100,6 +101,15 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c7447e..0c94557 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -331,6 +331,9 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
+ if (is_link_local_ether_addr(ha->addr))
+ continue;
+
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
@@ -513,6 +516,64 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUE;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 5);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 5);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
+
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 47f11a5..7b1f502 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -174,5 +174,7 @@ struct ixgbevf_info {
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 5948972..10d678d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1131,7 +1131,7 @@ static int pxa168_eth_open(struct net_device *dev)
err = request_irq(dev->irq, pxa168_eth_int_handler,
IRQF_DISABLED, dev->name, dev);
if (err) {
- dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ dev_err(&dev->dev, "can't assign irq\n");
return -EAGAIN;
}
pep->rx_resource_err = 0;
@@ -1201,9 +1201,8 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
*/
pxa168_eth_stop(dev);
if (pxa168_eth_open(dev)) {
- dev_printk(KERN_ERR, &dev->dev,
- "fatal error on re-opening device after "
- "MTU change\n");
+ dev_err(&dev->dev,
+ "fatal error on re-opening device after MTU change\n");
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index edd9cb8..2b23ca2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -870,7 +870,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
* - moder_cnt is set to the number of mtu sized packets to
- * satisfy our coelsing target.
+ * satisfy our coalescing target.
* - moder_time is set to a fixed value.
*/
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9d27e42..8a5e70d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,7 +126,7 @@ enum {
#define MLX4_EN_RX_COAL_TIME 0x10
#define MLX4_EN_TX_COAL_PKTS 16
-#define MLX4_EN_TX_COAL_TIME 0x80
+#define MLX4_EN_TX_COAL_TIME 0x10
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 69e0197..d16ef24 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7243,18 +7243,7 @@ static struct pci_driver pci_device_driver = {
.remove = pcidev_exit
};
-static int __init ksz884x_init_module(void)
-{
- return pci_register_driver(&pci_device_driver);
-}
-
-static void __exit ksz884x_cleanup_module(void)
-{
- pci_unregister_driver(&pci_device_driver);
-}
-
-module_init(ksz884x_init_module);
-module_exit(ksz884x_cleanup_module);
+module_pci_driver(pci_device_driver);
MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index de50547..c98decb 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8239,7 +8239,8 @@ static int __init s2io_starter(void)
/**
* s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
+ * Description: This function is the cleanup routine for the driver. It
+ * unregisters the driver.
*/
static __exit void s2io_closer(void)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 5296cc8..00bc4fc 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -20,19 +20,3 @@ config PCH_GBE
purpose use.
ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-if PCH_GBE
-
-config PCH_PTP
- bool "PCH PTP clock support"
- default n
- depends on EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- select PTP_1588_CLOCK_PCH
- ---help---
- Say Y here if you want to use Precision Time Protocol (PTP) in the
- driver. PTP is a method to precisely synchronize distributed clocks
- over Ethernet networks.
-
-endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index b07311e..7fb7e17 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -649,7 +649,6 @@ extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
struct pch_gbe_rx_ring *rx_ring);
extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-#ifdef CONFIG_PCH_PTP
extern u32 pch_ch_control_read(struct pci_dev *pdev);
extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
extern u32 pch_ch_event_read(struct pci_dev *pdev);
@@ -659,7 +658,6 @@ extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
extern u64 pch_rx_snap_read(struct pci_dev *pdev);
extern u64 pch_tx_snap_read(struct pci_dev *pdev);
extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
-#endif
/* pch_gbe_param.c */
extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4c4fe5b..39ab4d0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -21,10 +21,8 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
#include <linux/module.h>
-#ifdef CONFIG_PCH_PTP
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
-#endif
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
@@ -98,7 +96,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_DISABLE_ALL 0
-#ifdef CONFIG_PCH_PTP
/* Macros for ieee1588 */
/* 0x40 Time Synchronization Channel Control Register Bits */
#define MASTER_MODE (1<<0)
@@ -113,7 +110,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
-#endif
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -122,7 +118,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
static void pch_gbe_set_multi(struct net_device *netdev);
-#ifdef CONFIG_PCH_PTP
static struct sock_filter ptp_filter[] = {
PTP_FILTER
};
@@ -291,7 +286,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
-#endif
inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
{
@@ -1244,9 +1238,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
(int)sizeof(struct pch_gbe_tx_desc) * ring_num,
&hw->reg->TX_DSC_SW_P);
-#ifdef CONFIG_PCH_PTP
pch_tx_timestamp(adapter, skb);
-#endif
dev_kfree_skb_any(skb);
}
@@ -1730,9 +1722,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
/* Write meta date of skb */
skb_put(skb, length);
-#ifdef CONFIG_PCH_PTP
pch_rx_timestamp(adapter, skb);
-#endif
skb->protocol = eth_type_trans(skb, netdev);
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
@@ -2334,10 +2324,8 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
pr_debug("cmd : 0x%04x\n", cmd);
-#ifdef CONFIG_PCH_PTP
if (cmd == SIOCSHWTSTAMP)
return hwtstamp_ioctl(netdev, ifr, cmd);
-#endif
return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
}
@@ -2623,14 +2611,12 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_netdev;
}
-#ifdef CONFIG_PCH_PTP
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
pr_err("Bad ptp filter\n");
return -EINVAL;
}
-#endif
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 10468e7..4ca2c19 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -218,7 +218,7 @@ skip:
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
} else {
- ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |=
(ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
@@ -381,7 +381,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
static int
netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
- u8 * bytes)
+ u8 *bytes)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int offset;
@@ -488,6 +488,8 @@ netxen_nic_get_pauseparam(struct net_device *dev,
__u32 val;
int port = adapter->physical_port;
+ pause->autoneg = 0;
+
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return;
@@ -496,19 +498,19 @@ netxen_nic_get_pauseparam(struct net_device *dev,
pause->rx_pause = netxen_gb_get_rx_flowctl(val);
val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
switch (port) {
- case 0:
- pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
- break;
- case 1:
- pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
- break;
- case 2:
- pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
- break;
- case 3:
- default:
- pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
- break;
+ case 0:
+ pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+ break;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
@@ -532,6 +534,11 @@ netxen_nic_set_pauseparam(struct net_device *dev,
struct netxen_adapter *adapter = netdev_priv(dev);
__u32 val;
int port = adapter->physical_port;
+
+ /* not supported */
+ if (pause->autoneg)
+ return -EINVAL;
+
/* read mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
@@ -549,31 +556,31 @@ netxen_nic_set_pauseparam(struct net_device *dev,
/* set autoneg */
val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
switch (port) {
- case 0:
- if (pause->tx_pause)
- netxen_gb_unset_gb0_mask(val);
- else
- netxen_gb_set_gb0_mask(val);
- break;
- case 1:
- if (pause->tx_pause)
- netxen_gb_unset_gb1_mask(val);
- else
- netxen_gb_set_gb1_mask(val);
- break;
- case 2:
- if (pause->tx_pause)
- netxen_gb_unset_gb2_mask(val);
- else
- netxen_gb_set_gb2_mask(val);
- break;
- case 3:
- default:
- if (pause->tx_pause)
- netxen_gb_unset_gb3_mask(val);
- else
- netxen_gb_set_gb3_mask(val);
- break;
+ case 0:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb0_mask(val);
+ else
+ netxen_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb1_mask(val);
+ else
+ netxen_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb2_mask(val);
+ else
+ netxen_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb3_mask(val);
+ else
+ netxen_gb_set_gb3_mask(val);
+ break;
}
NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
@@ -636,7 +643,7 @@ static int netxen_get_sset_count(struct net_device *dev, int sset)
static void
netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
- u64 * data)
+ u64 *data)
{
memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
if ((data[0] = netxen_nic_reg_test(dev)))
@@ -647,7 +654,7 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
}
static void
-netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int index;
@@ -668,7 +675,7 @@ netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
static void
netxen_nic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 * data)
+ struct ethtool_stats *stats, u64 *data)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int index;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 6407d0d..12d1f24 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1920,7 +1920,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
{
struct ql_tx_buf_cb *tx_cb;
int i;
- int retval = 0;
if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
netdev_warn(qdev->ndev,
@@ -1935,7 +1934,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
"Frame too short to be legal, frame not sent\n");
qdev->ndev->stats.tx_errors++;
- retval = -EIO;
goto frame_not_sent;
}
@@ -1944,7 +1942,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
mac_rsp->transaction_id);
qdev->ndev->stats.tx_errors++;
- retval = -EIO;
goto invalid_seg_count;
}
@@ -3958,15 +3955,4 @@ static struct pci_driver ql3xxx_driver = {
.remove = __devexit_p(ql3xxx_remove),
};
-static int __init ql3xxx_init_module(void)
-{
- return pci_register_driver(&ql3xxx_driver);
-}
-
-static void __exit ql3xxx_exit(void)
-{
- pci_unregister_driver(&ql3xxx_driver);
-}
-
-module_init(ql3xxx_init_module);
-module_exit(ql3xxx_exit);
+module_pci_driver(ql3xxx_driver);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 58185b6..10093f0 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -86,7 +86,7 @@ exit:
}
/* Read out the SERDES registers */
-static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -364,7 +364,7 @@ exit:
/* Read the 400 xgmac control/statistics registers
* skipping unused locations.
*/
-static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
unsigned int other_function)
{
int status = 0;
@@ -405,7 +405,7 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
return status;
}
-static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
{
int status = 0;
int i;
@@ -423,7 +423,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
return status;
}
-static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
{
int i;
@@ -434,7 +434,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
}
}
-static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
{
int i, status;
u32 value[3];
@@ -471,7 +471,7 @@ err:
return status;
}
-static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
{
int status;
u32 value, i;
@@ -496,7 +496,7 @@ err:
}
/* Read the MPI Processor shadow registers */
-static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
{
u32 i;
int status;
@@ -515,7 +515,7 @@ end:
}
/* Read the MPI Processor core registers */
-static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
u32 offset, u32 count)
{
int i, status = 0;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index e02f04d..9f2d416 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -175,8 +175,7 @@ struct net_local {
unsigned int tx_unit_busy:1;
unsigned char re_tx, /* Number of packet retransmissions. */
addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
- pac_cnt_in_tx_buf,
- chip_type;
+ pac_cnt_in_tx_buf;
};
/* This code, written by wwc@super.org, resets the adapter every
@@ -339,7 +338,6 @@ static int __init atp_probe1(long ioaddr)
write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
lp = netdev_priv(dev);
- lp->chip_type = RTL8002;
lp->addr_mode = CMR2h_Normal;
spin_lock_init(&lp->lock);
@@ -852,7 +850,7 @@ net_close(struct net_device *dev)
* Set or clear the multicast filter for this adapter.
*/
-static void set_rx_mode_8002(struct net_device *dev)
+static void set_rx_mode(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
@@ -864,58 +862,6 @@ static void set_rx_mode_8002(struct net_device *dev)
write_reg_high(ioaddr, CMR2, lp->addr_mode);
}
-static void set_rx_mode_8012(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
- int i;
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- new_mode = CMR2h_PROMISC;
- } else if ((netdev_mc_count(dev) > 1000) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- new_mode = CMR2h_Normal;
- } else {
- struct netdev_hw_addr *ha;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
- mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
- }
- new_mode = CMR2h_Normal;
- }
- lp->addr_mode = new_mode;
- write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
- for (i = 0; i < 8; i++)
- write_reg_byte(ioaddr, i, mc_filter[i]);
- if (net_debug > 2 || 1) {
- lp->addr_mode = 1;
- printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
- dev->name, lp->addr_mode);
- for (i = 0; i < 8; i++)
- printk(" %2.2x", mc_filter[i]);
- printk(".\n");
- }
-
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
- write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
-}
-
-static void set_rx_mode(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- if (lp->chip_type == RTL8002)
- return set_rx_mode_8002(dev);
- else
- return set_rx_mode_8012(dev);
-}
-
-
static int __init atp_init_module(void) {
if (debug) /* Emit version even if no cards detected. */
printk(KERN_INFO "%s", version);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 0edc642..040b137 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -16,8 +16,6 @@ struct rx_header {
#define PAR_STATUS 1
#define PAR_CONTROL 2
-enum chip_type { RTL8002, RTL8012 };
-
#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
#define Ctrl_HNibRead 0
#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 927aa33..248f883 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -78,24 +78,18 @@ static const int multicast_filter_limit = 32;
#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
-#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
-#define RX_BUF_SIZE 1536 /* Rx Buffer size */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ)
#define RTL8169_PHY_TIMEOUT (10*HZ)
-#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
-#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
-#define RTL_EEPROM_SIG_ADDR 0x0000
-
/* write/read MMIO register */
#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
@@ -456,6 +450,7 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
+#define FORCE_CLK (1 << 15) /* force clock request */
};
enum rtl_register_content {
@@ -519,6 +514,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ ClkReqEn = (1 << 7), /* Clock Request Enable */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -539,6 +535,7 @@ enum rtl_register_content {
Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+ ASPM_en = (1 << 0), /* ASPM enable */
/* TBICSR p.28 */
TBIReset = 0x80000000,
@@ -687,6 +684,7 @@ enum features {
RTL_FEATURE_WOL = (1 << 0),
RTL_FEATURE_MSI = (1 << 1),
RTL_FEATURE_GMII = (1 << 2),
+ RTL_FEATURE_FW_LOADED = (1 << 3),
};
struct rtl8169_counters {
@@ -1816,8 +1814,7 @@ static int rtl8169_set_features(struct net_device *dev,
}
-static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
- struct sk_buff *skb)
+static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
{
return (vlan_tx_tag_present(skb)) ?
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
@@ -2394,8 +2391,10 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
struct rtl_fw *rtl_fw = tp->rtl_fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw))
+ if (!IS_ERR_OR_NULL(rtl_fw)) {
rtl_phy_write_fw(tp, rtl_fw);
+ tp->features |= RTL_FEATURE_FW_LOADED;
+ }
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2406,6 +2405,31 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
+static void r810x_aldps_disable(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+}
+
+static void r810x_aldps_enable(struct rtl8169_private *tp)
+{
+ if (!(tp->features & RTL_FEATURE_FW_LOADED))
+ return;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x8310);
+}
+
+static void r8168_aldps_enable_1(struct rtl8169_private *tp)
+{
+ if (!(tp->features & RTL_FEATURE_FW_LOADED))
+ return;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3178,6 +3202,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
@@ -3250,6 +3276,8 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3257,6 +3285,8 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3354,6 +3384,8 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3439,21 +3471,19 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
+ r810x_aldps_disable(tp);
rtl_apply_firmware(tp);
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ r810x_aldps_enable(tp);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(20);
+ r810x_aldps_disable(tp);
rtl_apply_firmware(tp);
@@ -3463,6 +3493,8 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r810x_aldps_enable(tp);
}
static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3475,9 +3507,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
+ r810x_aldps_disable(tp);
rtl_apply_firmware(tp);
@@ -3485,6 +3515,8 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ r810x_aldps_enable(tp);
}
static void rtl_hw_phy_config(struct net_device *dev)
@@ -5015,8 +5047,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
- rtl_disable_clock_request(pdev);
-
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
@@ -5025,7 +5055,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5050,13 +5081,12 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
- rtl_disable_clock_request(pdev);
-
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
+ RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5113,8 +5143,10 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5330,6 +5362,9 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
@@ -5355,6 +5390,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5376,7 +5414,10 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W32(MISC,
+ (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
@@ -5774,7 +5815,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
opts[0] = DescOwn;
rtl8169_tso_csum(tp, skb, opts);
@@ -6992,15 +7033,4 @@ static struct pci_driver rtl8169_pci_driver = {
.driver.pm = RTL8169_PM_OPS,
};
-static int __init rtl8169_init_module(void)
-{
- return pci_register_driver(&rtl8169_pci_driver);
-}
-
-static void __exit rtl8169_cleanup_module(void)
-{
- pci_unregister_driver(&rtl8169_pci_driver);
-}
-
-module_init(rtl8169_init_module);
-module_exit(rtl8169_cleanup_module);
+module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8bfea0..3d70586 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2286,7 +2286,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
- /* regist mdio bus */
+ /* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
if (ret)
goto out_free_irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 25906c1..3ab2c428 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -5,6 +5,7 @@ config SFC
select CRC32
select I2C
select I2C_ALGOBIT
+ select PTP_1588_CLOCK
---help---
This driver supports 10-gigabit Ethernet cards based on
the Solarflare SFC4000 and SFC9000-family controllers.
@@ -34,10 +35,3 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
-config SFC_PTP
- bool "Solarflare SFC9000-family PTP support"
- depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
- default y
- ---help---
- This enables support for the Precision Time Protocol (PTP)
- on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index e11f2ec..945bf06 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,9 +2,8 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
falcon_xmac.o mcdi_mac.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o mcdi_mon.o
+ mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
-sfc-$(CONFIG_SFC_PTP) += ptp.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 576a310..2487f58 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -868,9 +868,7 @@ struct efx_nic {
struct work_struct peer_work;
#endif
-#ifdef CONFIG_SFC_PTP
struct efx_ptp_data *ptp_data;
-#endif
/* The following fields may be written more often */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 438cef1..7a9647a 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -252,7 +252,6 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-#ifdef CONFIG_SFC_PTP
extern void efx_ptp_probe(struct efx_nic *efx);
extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
extern int efx_ptp_get_ts_info(struct net_device *net_dev,
@@ -260,31 +259,6 @@ extern int efx_ptp_get_ts_info(struct net_device *net_dev,
extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-#else
-static inline void efx_ptp_probe(struct efx_nic *efx) {}
-static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
-{
- return -EOPNOTSUPP;
-}
-static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
-{
- ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE);
- ts_info->phc_index = -1;
-
- return 0;
-}
-static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
- return false;
-}
-static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
- return NETDEV_TX_OK;
-}
-static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
-#endif
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 2c41894..48fcb5e 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -60,6 +60,14 @@ config TI_CPSW
To compile this driver as a module, choose M here: the module
will be called cpsw.
+config TI_CPTS
+ boolean "TI Common Platform Time Sync (CPTS) Support"
+ select PTP_1588_CLOCK
+ ---help---
+ This driver supports the Common Platform Time Sync unit of
+ the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
+ and Layer 2 packets, and the driver offers a PTP Hardware Clock.
+
config TLAN
tristate "TI ThunderLAN support"
depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 91bd8bb..c65148e 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw_ale.o cpsw.o
+ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index df55e24..02c2477 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -24,6 +24,7 @@
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
@@ -35,6 +36,7 @@
#include <linux/platform_data/cpsw.h>
#include "cpsw_ale.h"
+#include "cpts.h"
#include "davinci_cpdma.h"
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
@@ -70,10 +72,37 @@ do { \
dev_notice(priv->dev, format, ## __VA_ARGS__); \
} while (0)
+#define ALE_ALL_PORTS 0x7
+
#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+#define CPSW_VERSION_1 0x19010a
+#define CPSW_VERSION_2 0x19010c
+
+#define HOST_PORT_NUM 0
+#define SLIVER_SIZE 0x40
+
+#define CPSW1_HOST_PORT_OFFSET 0x028
+#define CPSW1_SLAVE_OFFSET 0x050
+#define CPSW1_SLAVE_SIZE 0x040
+#define CPSW1_CPDMA_OFFSET 0x100
+#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_CPTS_OFFSET 0x500
+#define CPSW1_ALE_OFFSET 0x600
+#define CPSW1_SLIVER_OFFSET 0x700
+
+#define CPSW2_HOST_PORT_OFFSET 0x108
+#define CPSW2_SLAVE_OFFSET 0x200
+#define CPSW2_SLAVE_SIZE 0x100
+#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_STATERAM_OFFSET 0xa00
+#define CPSW2_CPTS_OFFSET 0xc00
+#define CPSW2_ALE_OFFSET 0xd00
+#define CPSW2_SLIVER_OFFSET 0xd80
+#define CPSW2_BD_OFFSET 0x2000
+
#define CPDMA_RXTHRESH 0x0c0
#define CPDMA_RXFREE 0x0e0
#define CPDMA_TXHDP 0x00
@@ -81,21 +110,6 @@ do { \
#define CPDMA_TXCP 0x40
#define CPDMA_RXCP 0x60
-#define cpsw_dma_regs(base, offset) \
- (void __iomem *)((base) + (offset))
-#define cpsw_dma_rxthresh(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
-#define cpsw_dma_rxfree(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXFREE)
-#define cpsw_dma_txhdp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_TXHDP)
-#define cpsw_dma_rxhdp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXHDP)
-#define cpsw_dma_txcp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_TXCP)
-#define cpsw_dma_rxcp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXCP)
-
#define CPSW_POLL_WEIGHT 64
#define CPSW_MIN_PACKET_SIZE 60
#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
@@ -129,7 +143,7 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
-struct cpsw_ss_regs {
+struct cpsw_wr_regs {
u32 id_ver;
u32 soft_reset;
u32 control;
@@ -140,26 +154,98 @@ struct cpsw_ss_regs {
u32 misc_en;
};
-struct cpsw_regs {
+struct cpsw_ss_regs {
u32 id_ver;
u32 control;
u32 soft_reset;
u32 stat_port_en;
u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 vlan_ltype;
+ u32 ts_ltype;
+ u32 dlr_ltype;
};
-struct cpsw_slave_regs {
- u32 max_blks;
- u32 blk_cnt;
- u32 flow_thresh;
- u32 port_vlan;
- u32 tx_pri_map;
- u32 ts_ctl;
- u32 ts_seq_ltype;
- u32 ts_vlan;
- u32 sa_lo;
- u32 sa_hi;
-};
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL 0x00 /* Control Register */
+#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
+#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
+#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
+#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
+#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
+#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
+
+#define CTRL_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
+ TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
+#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN BIT(0)
+#define CPSW_V1_TS_TX_EN BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS 16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
struct cpsw_host_regs {
u32 max_blks;
@@ -185,7 +271,7 @@ struct cpsw_sliver_regs {
};
struct cpsw_slave {
- struct cpsw_slave_regs __iomem *regs;
+ void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
int slave_num;
u32 mac_control;
@@ -193,19 +279,30 @@ struct cpsw_slave {
struct phy_device *phy;
};
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+ return __raw_readl(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+ __raw_writel(val, slave->regs + offset);
+}
+
struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
struct resource *cpsw_res;
- struct resource *cpsw_ss_res;
+ struct resource *cpsw_wr_res;
struct napi_struct napi;
struct device *dev;
struct cpsw_platform_data data;
- struct cpsw_regs __iomem *regs;
- struct cpsw_ss_regs __iomem *ss_regs;
+ struct cpsw_ss_regs __iomem *regs;
+ struct cpsw_wr_regs __iomem *wr_regs;
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
+ u32 version;
struct net_device_stats stats;
int rx_packet_max;
int host_port;
@@ -218,6 +315,7 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
+ struct cpts cpts;
};
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
@@ -228,10 +326,34 @@ struct cpsw_priv {
(func)((priv)->slaves + idx, ##arg); \
} while (0)
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+ return;
+ }
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ /* program multicast address list into ALE register */
+ netdev_for_each_mc_addr(ha, ndev) {
+ cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
+ ALE_ALL_PORTS << priv->host_port, 0, 0);
+ }
+ }
+}
+
static void cpsw_intr_enable(struct cpsw_priv *priv)
{
- __raw_writel(0xFF, &priv->ss_regs->tx_en);
- __raw_writel(0xFF, &priv->ss_regs->rx_en);
+ __raw_writel(0xFF, &priv->wr_regs->tx_en);
+ __raw_writel(0xFF, &priv->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(priv->dma, true);
return;
@@ -239,8 +361,8 @@ static void cpsw_intr_enable(struct cpsw_priv *priv)
static void cpsw_intr_disable(struct cpsw_priv *priv)
{
- __raw_writel(0, &priv->ss_regs->tx_en);
- __raw_writel(0, &priv->ss_regs->rx_en);
+ __raw_writel(0, &priv->wr_regs->tx_en);
+ __raw_writel(0, &priv->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(priv->dma, false);
return;
@@ -254,6 +376,7 @@ void cpsw_tx_handler(void *token, int len, int status)
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
+ cpts_tx_timestamp(&priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -274,6 +397,7 @@ void cpsw_rx_handler(void *token, int len, int status)
}
if (likely(status >= 0)) {
skb_put(skb, len);
+ cpts_rx_timestamp(&priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
@@ -359,8 +483,8 @@ static inline void soft_reset(const char *module, void __iomem *reg)
static void cpsw_set_slave_mac(struct cpsw_slave *slave,
struct cpsw_priv *priv)
{
- __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
- __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
+ slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+ slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
}
static void _cpsw_adjust_link(struct cpsw_slave *slave,
@@ -446,7 +570,15 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
/* setup priority mapping */
__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
- __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map);
+
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ break;
+ case CPSW_VERSION_2:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ break;
+ }
/* setup max packet size, and mac address */
__raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
@@ -505,7 +637,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
pm_runtime_get_sync(&priv->pdev->dev);
- reg = __raw_readl(&priv->regs->id_ver);
+ reg = priv->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
@@ -566,12 +698,12 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
- cpdma_ctlr_stop(priv->dma);
netif_stop_queue(priv->ndev);
napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
cpsw_ale_stop(priv->ale);
for_each_slave(priv, cpsw_slave_stop, priv);
pm_runtime_put_sync(&priv->pdev->dev);
@@ -592,6 +724,11 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+
ret = cpdma_chan_submit(priv->txch, skb, skb->data,
skb->len, GFP_KERNEL);
if (unlikely(ret != 0)) {
@@ -629,6 +766,130 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
}
+#ifdef CONFIG_TI_CPTS
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+ struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ u32 ts_en, seq_id;
+
+ if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+ if (priv->cpts.tx_enable)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+ if (priv->cpts.rx_enable)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+ slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+ struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ u32 ctrl, mtype;
+
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ ctrl &= ~CTRL_ALL_TS_MASK;
+
+ if (priv->cpts.tx_enable)
+ ctrl |= CTRL_TX_TS_BITS;
+
+ if (priv->cpts.rx_enable)
+ ctrl |= CTRL_RX_TS_BITS;
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+ slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+ slave_write(slave, ctrl, CPSW2_CONTROL);
+ __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
+}
+
+static int cpsw_hwtstamp_ioctl(struct cpsw_priv *priv, struct ifreq *ifr)
+{
+ struct cpts *cpts = &priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ cpts->tx_enable = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ cpts->tx_enable = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ cpts->rx_enable = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cpts->rx_enable = 1;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ cpsw_hwtstamp_v1(priv);
+ break;
+ case CPSW_VERSION_2:
+ cpsw_hwtstamp_v2(priv);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+#ifdef CONFIG_TI_CPTS
+ if (cmd == SIOCSHWTSTAMP)
+ return cpsw_hwtstamp_ioctl(priv, req);
+#endif
+ return -ENOTSUPP;
+}
+
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -669,10 +930,12 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
+ .ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_get_stats = cpsw_ndo_get_stats,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
@@ -699,22 +962,56 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+static int cpsw_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+#ifdef CONFIG_TI_CPTS
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = priv->cpts.phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+#else
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+#endif
+ return 0;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
};
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+ u32 slave_reg_ofs, u32 sliver_reg_ofs)
{
void __iomem *regs = priv->regs;
int slave_num = slave->slave_num;
struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
slave->data = data;
- slave->regs = regs + data->slave_reg_ofs;
- slave->sliver = regs + data->sliver_reg_ofs;
+ slave->regs = regs + slave_reg_ofs;
+ slave->sliver = regs + sliver_reg_ofs;
}
static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -734,49 +1031,40 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->slaves = prop;
- data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
- data->slaves, GFP_KERNEL);
- if (!data->slave_data) {
- pr_err("Could not allocate slave memory.\n");
- return -EINVAL;
- }
-
- data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
-
- if (of_property_read_u32(node, "cpdma_channels", &prop)) {
- pr_err("Missing cpdma_channels property in the DT.\n");
+ if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
+ pr_err("Missing cpts_active_slave property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->channels = prop;
+ data->cpts_active_slave = prop;
- if (of_property_read_u32(node, "host_port_no", &prop)) {
- pr_err("Missing host_port_no property in the DT.\n");
+ if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
+ pr_err("Missing cpts_clock_mult property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->host_port_num = prop;
+ data->cpts_clock_mult = prop;
- if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
- pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+ if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
+ pr_err("Missing cpts_clock_shift property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->cpdma_reg_ofs = prop;
+ data->cpts_clock_shift = prop;
- if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
- pr_err("Missing cpdma_sram_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+ data->slaves, GFP_KERNEL);
+ if (!data->slave_data) {
+ pr_err("Could not allocate slave memory.\n");
+ return -EINVAL;
}
- data->cpdma_sram_ofs = prop;
- if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
- pr_err("Missing ale_reg_ofs property in the DT.\n");
+ if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+ pr_err("Missing cpdma_channels property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->ale_reg_ofs = prop;
+ data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
pr_err("Missing ale_entries property in the DT.\n");
@@ -785,27 +1073,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->ale_entries = prop;
- if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
- pr_err("Missing host_port_reg_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- data->host_port_reg_ofs = prop;
-
- if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
- pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- data->hw_stats_reg_ofs = prop;
-
- if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
- pr_err("Missing bd_ram_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- data->bd_ram_ofs = prop;
-
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
pr_err("Missing bd_ram_size property in the DT.\n");
ret = -EINVAL;
@@ -827,33 +1094,34 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
- for_each_child_of_node(node, slave_node) {
+ /*
+ * Populate all the child nodes here...
+ */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ pr_warn("Doesn't have any child node\n");
+
+ for_each_node_by_name(slave_node, "slave") {
struct cpsw_slave_data *slave_data = data->slave_data + i;
- const char *phy_id = NULL;
const void *mac_addr = NULL;
-
- if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+ u32 phyid;
+ int lenp;
+ const __be32 *parp;
+ struct device_node *mdio_node;
+ struct platform_device *mdio;
+
+ parp = of_get_property(slave_node, "phy_id", &lenp);
+ if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
ret = -EINVAL;
goto error_ret;
}
- slave_data->phy_id = phy_id;
-
- if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
- pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
- ret = -EINVAL;
- goto error_ret;
- }
- slave_data->slave_reg_ofs = prop;
-
- if (of_property_read_u32(slave_node, "sliver_reg_ofs",
- &prop)) {
- pr_err("Missing slave[%d] sliver_reg_ofs property\n",
- i);
- ret = -EINVAL;
- goto error_ret;
- }
- slave_data->sliver_reg_ofs = prop;
+ mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ phyid = be32_to_cpup(parp+1);
+ mdio = of_find_device_by_node(mdio_node);
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
@@ -876,8 +1144,9 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
struct cpsw_priv *priv;
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
- void __iomem *regs;
+ void __iomem *ss_regs, *wr_regs;
struct resource *res;
+ u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i, k = 0;
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
@@ -895,6 +1164,11 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ /*
+ * This may be required here for child devices.
+ */
+ pm_runtime_enable(&pdev->dev);
+
if (cpsw_probe_dt(&priv->data, pdev)) {
pr_err("cpsw: platform data missing\n");
ret = -ENODEV;
@@ -921,7 +1195,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
- pm_runtime_enable(&pdev->dev);
priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "fck is not found\n");
@@ -935,63 +1208,86 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
ret = -ENOENT;
goto clean_clk_ret;
}
-
if (!request_mem_region(priv->cpsw_res->start,
resource_size(priv->cpsw_res), ndev->name)) {
dev_err(priv->dev, "failed request i/o region\n");
ret = -ENXIO;
goto clean_clk_ret;
}
-
- regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
- if (!regs) {
+ ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
+ if (!ss_regs) {
dev_err(priv->dev, "unable to map i/o region\n");
goto clean_cpsw_iores_ret;
}
- priv->regs = regs;
- priv->host_port = data->host_port_num;
- priv->host_port_regs = regs + data->host_port_reg_ofs;
+ priv->regs = ss_regs;
+ priv->version = __raw_readl(&priv->regs->id_ver);
+ priv->host_port = HOST_PORT_NUM;
- priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!priv->cpsw_ss_res) {
+ priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!priv->cpsw_wr_res) {
dev_err(priv->dev, "error getting i/o resource\n");
ret = -ENOENT;
- goto clean_clk_ret;
+ goto clean_iomap_ret;
}
-
- if (!request_mem_region(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res), ndev->name)) {
+ if (!request_mem_region(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res), ndev->name)) {
dev_err(priv->dev, "failed request i/o region\n");
ret = -ENXIO;
- goto clean_clk_ret;
+ goto clean_iomap_ret;
}
-
- regs = ioremap(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res));
- if (!regs) {
+ wr_regs = ioremap(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res));
+ if (!wr_regs) {
dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_ss_iores_ret;
+ goto clean_cpsw_wr_iores_ret;
}
- priv->ss_regs = regs;
-
- for_each_slave(priv, cpsw_slave_init, priv);
+ priv->wr_regs = wr_regs;
memset(&dma_params, 0, sizeof(dma_params));
+ memset(&ale_params, 0, sizeof(ale_params));
+
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
+ dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
+ slave_offset = CPSW1_SLAVE_OFFSET;
+ slave_size = CPSW1_SLAVE_SIZE;
+ sliver_offset = CPSW1_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = 0;
+ break;
+ case CPSW_VERSION_2:
+ priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
+ dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
+ slave_offset = CPSW2_SLAVE_OFFSET;
+ slave_size = CPSW2_SLAVE_SIZE;
+ sliver_offset = CPSW2_SLIVER_OFFSET;
+ dma_params.desc_mem_phys =
+ (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+ break;
+ default:
+ dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
+ ret = -ENODEV;
+ goto clean_cpsw_wr_iores_ret;
+ }
+ for (i = 0; i < priv->data.slaves; i++) {
+ struct cpsw_slave *slave = &priv->slaves[i];
+ cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
+ slave_offset += slave_size;
+ sliver_offset += SLIVER_SIZE;
+ }
+
dma_params.dev = &pdev->dev;
- dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs,
- data->cpdma_reg_ofs);
- dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs,
- data->cpdma_reg_ofs);
- dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs,
- data->cpdma_reg_ofs);
- dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs,
- data->cpdma_sram_ofs);
- dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs,
- data->cpdma_sram_ofs);
- dma_params.txcp = cpsw_dma_txcp((u32)priv->regs,
- data->cpdma_sram_ofs);
- dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs,
- data->cpdma_sram_ofs);
+ dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
+ dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
+ dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
+ dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
+ dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
dma_params.num_chan = data->channels;
dma_params.has_soft_reset = true;
@@ -999,16 +1295,13 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
dma_params.desc_mem_size = data->bd_ram_size;
dma_params.desc_align = 16;
dma_params.has_ext_regs = true;
- dma_params.desc_mem_phys = data->no_bd_ram ? 0 :
- (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
- dma_params.desc_hw_addr = data->hw_ram_addr ?
- data->hw_ram_addr : dma_params.desc_mem_phys ;
+ dma_params.desc_hw_addr = dma_params.desc_mem_phys;
priv->dma = cpdma_ctlr_create(&dma_params);
if (!priv->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_iomap_ret;
+ goto clean_wr_iomap_ret;
}
priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -1022,10 +1315,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
- memset(&ale_params, 0, sizeof(ale_params));
ale_params.dev = &ndev->dev;
- ale_params.ale_regs = (void *)((u32)priv->regs) +
- ((u32)data->ale_reg_ofs);
ale_params.ale_ageout = ale_ageout;
ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = data->slaves;
@@ -1072,6 +1362,10 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
goto clean_irq_ret;
}
+ if (cpts_register(&pdev->dev, &priv->cpts,
+ data->cpts_clock_mult, data->cpts_clock_shift))
+ dev_err(priv->dev, "error registering cpts device\n");
+
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
priv->cpsw_res->start, ndev->irq);
@@ -1085,11 +1379,13 @@ clean_dma_ret:
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
+clean_wr_iomap_ret:
+ iounmap(priv->wr_regs);
+clean_cpsw_wr_iores_ret:
+ release_mem_region(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res));
clean_iomap_ret:
iounmap(priv->regs);
-clean_cpsw_ss_iores_ret:
- release_mem_region(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res));
clean_cpsw_iores_ret:
release_mem_region(priv->cpsw_res->start,
resource_size(priv->cpsw_res));
@@ -1111,6 +1407,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
pr_info("removing device");
platform_set_drvdata(pdev, NULL);
+ cpts_unregister(&priv->cpts);
free_irq(ndev->irq, priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
@@ -1119,8 +1416,9 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
iounmap(priv->regs);
release_mem_region(priv->cpsw_res->start,
resource_size(priv->cpsw_res));
- release_mem_region(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res));
+ iounmap(priv->wr_regs);
+ release_mem_region(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res));
pm_runtime_disable(&pdev->dev);
clk_put(priv->clk);
kfree(priv->slaves);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ca0d48a..0e9ccc2 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include <linux/etherdevice.h>
#include "cpsw_ale.h"
@@ -211,10 +212,34 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
mask &= ~port_mask;
/* free if only remaining port is host port */
- if (mask == BIT(ale->params.ale_ports))
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
- else
+ if (mask)
cpsw_ale_set_port_mask(ale_entry, mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int ret, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ ret = cpsw_ale_get_entry_type(ale_entry);
+ if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+ continue;
+
+ if (cpsw_ale_get_mcast(ale_entry)) {
+ u8 addr[6];
+
+ cpsw_ale_get_addr(ale_entry, addr);
+ if (!is_broadcast_ether_addr(addr))
+ cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+ }
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+ return 0;
}
static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index a95b37b..2bd09cb 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -80,6 +80,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 0000000..3377667
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,427 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/err.h>
+#include <linux/if.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <plat/clock.h>
+
+#include "cpts.h"
+
+#ifdef CONFIG_TI_CPTS
+
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
+#define cpts_read32(c, r) __raw_readl(&c->reg->r)
+#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
+
+static int event_expired(struct cpts_event *event)
+{
+ return time_after(jiffies, event->tmo);
+}
+
+static int event_type(struct cpts_event *event)
+{
+ return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+}
+
+static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
+{
+ u32 r = cpts_read32(cpts, intstat_raw);
+
+ if (r & TS_PEND_RAW) {
+ *high = cpts_read32(cpts, event_high);
+ *low = cpts_read32(cpts, event_low);
+ cpts_write32(cpts, EVENT_POP, event_pop);
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * Returns zero if matching event type was found.
+ */
+static int cpts_fifo_read(struct cpts *cpts, int match)
+{
+ int i, type = -1;
+ u32 hi, lo;
+ struct cpts_event *event;
+
+ for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
+ if (cpts_fifo_pop(cpts, &hi, &lo))
+ break;
+ if (list_empty(&cpts->pool)) {
+ pr_err("cpts: event pool is empty\n");
+ return -1;
+ }
+ event = list_first_entry(&cpts->pool, struct cpts_event, list);
+ event->tmo = jiffies + 2;
+ event->high = hi;
+ event->low = lo;
+ type = event_type(event);
+ switch (type) {
+ case CPTS_EV_PUSH:
+ case CPTS_EV_RX:
+ case CPTS_EV_TX:
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+ break;
+ case CPTS_EV_ROLL:
+ case CPTS_EV_HALF:
+ case CPTS_EV_HW:
+ break;
+ default:
+ pr_err("cpts: unkown event type\n");
+ break;
+ }
+ if (type == match)
+ break;
+ }
+ return type == match ? 0 : -1;
+}
+
+static cycle_t cpts_systim_read(const struct cyclecounter *cc)
+{
+ u64 val = 0;
+ struct cpts_event *event;
+ struct list_head *this, *next;
+ struct cpts *cpts = container_of(cc, struct cpts, cc);
+
+ cpts_write32(cpts, TS_PUSH, ts_push);
+ if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
+ pr_err("cpts: unable to obtain a time stamp\n");
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_type(event) == CPTS_EV_PUSH) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ val = event->low;
+ break;
+ }
+ }
+
+ return val;
+}
+
+/* PTP clock operations */
+
+static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, mult;
+ int neg_adj = 0;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ mult = cpts->cc_mult;
+ adj = mult;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+
+ timecounter_read(&cpts->tc);
+
+ cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return 0;
+}
+
+static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ now = timecounter_read(&cpts->tc);
+ now += delta;
+ timecounter_init(&cpts->tc, &cpts->cc, now);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return 0;
+}
+
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ns = timecounter_read(&cpts->tc);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ timecounter_init(&cpts->tc, &cpts->cc, ns);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return 0;
+}
+
+static int cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info cpts_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .max_adj = 1000000,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfreq = cpts_ptp_adjfreq,
+ .adjtime = cpts_ptp_adjtime,
+ .gettime = cpts_ptp_gettime,
+ .settime = cpts_ptp_settime,
+ .enable = cpts_ptp_enable,
+};
+
+static void cpts_overflow_check(struct work_struct *work)
+{
+ struct timespec ts;
+ struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
+
+ cpts_write32(cpts, CPTS_EN, control);
+ cpts_write32(cpts, TS_PEND_EN, int_enable);
+ cpts_ptp_gettime(&cpts->info, &ts);
+ pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+}
+
+#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
+
+static void cpts_clk_init(struct cpts *cpts)
+{
+ cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+ if (IS_ERR(cpts->refclk)) {
+ pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+ cpts->refclk = NULL;
+ return;
+ }
+ clk_enable(cpts->refclk);
+ cpts->freq = cpts->refclk->recalc(cpts->refclk);
+}
+
+static void cpts_clk_release(struct cpts *cpts)
+{
+ clk_disable(cpts->refclk);
+ clk_put(cpts->refclk);
+}
+
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+ u16 ts_seqid, u8 ts_msgtype)
+{
+ u16 *seqid;
+ unsigned int offset;
+ u8 *msgtype, *data = skb->data;
+
+ switch (ptp_class) {
+ case PTP_CLASS_V1_IPV4:
+ case PTP_CLASS_V2_IPV4:
+ offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ case PTP_CLASS_V2_IPV6:
+ offset = OFF_PTP6;
+ break;
+ case PTP_CLASS_V2_L2:
+ offset = ETH_HLEN;
+ break;
+ case PTP_CLASS_V2_VLAN:
+ offset = ETH_HLEN + VLAN_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return 0;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ msgtype = data + offset + OFF_PTP_CONTROL;
+ else
+ msgtype = data + offset;
+
+ seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+}
+
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+{
+ u64 ns = 0;
+ struct cpts_event *event;
+ struct list_head *this, *next;
+ unsigned int class = sk_run_filter(skb, ptp_filter);
+ unsigned long flags;
+ u16 seqid;
+ u8 mtype;
+
+ if (class == PTP_CLASS_NONE)
+ return 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ cpts_fifo_read(cpts, CPTS_EV_PUSH);
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_expired(event)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ continue;
+ }
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+ if (ev_type == event_type(event) &&
+ cpts_match(skb, class, seqid, mtype)) {
+ ns = timecounter_cyc2time(&cpts->tc, event->low);
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ u64 ns;
+ struct skb_shared_hwtstamps *ssh;
+
+ if (!cpts->rx_enable)
+ return;
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+ if (!ns)
+ return;
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ u64 ns;
+ struct skb_shared_hwtstamps ssh;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
+ if (!ns)
+ return;
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+int cpts_register(struct device *dev, struct cpts *cpts,
+ u32 mult, u32 shift)
+{
+#ifdef CONFIG_TI_CPTS
+ int err, i;
+ unsigned long flags;
+
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+ pr_err("cpts: bad ptp filter\n");
+ return -EINVAL;
+ }
+ cpts->info = cpts_info;
+ cpts->clock = ptp_clock_register(&cpts->info, dev);
+ if (IS_ERR(cpts->clock)) {
+ err = PTR_ERR(cpts->clock);
+ cpts->clock = NULL;
+ return err;
+ }
+ spin_lock_init(&cpts->lock);
+
+ cpts->cc.read = cpts_systim_read;
+ cpts->cc.mask = CLOCKSOURCE_MASK(32);
+ cpts->cc_mult = mult;
+ cpts->cc.mult = mult;
+ cpts->cc.shift = shift;
+
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ for (i = 0; i < CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts_clk_init(cpts);
+ cpts_write32(cpts, CPTS_EN, control);
+ cpts_write32(cpts, TS_PEND_EN, int_enable);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
+ schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+
+ cpts->phc_index = ptp_clock_index(cpts->clock);
+#endif
+ return 0;
+}
+
+void cpts_unregister(struct cpts *cpts)
+{
+#ifdef CONFIG_TI_CPTS
+ if (cpts->clock) {
+ ptp_clock_unregister(cpts->clock);
+ cancel_delayed_work_sync(&cpts->overflow_work);
+ }
+ if (cpts->refclk)
+ cpts_clk_release(cpts);
+#endif
+}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 0000000..e1bba3a
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,146 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _TI_CPTS_H_
+#define _TI_CPTS_H_
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clocksource.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/skbuff.h>
+
+struct cpsw_cpts {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 res1;
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val; /* Time stamp load value */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 res2[2];
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 res3;
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_low; /* 32 Bit Event Time Stamp */
+ u32 event_high; /* Event Type Fields */
+};
+
+/* Bit definitions for the IDVER register */
+#define TX_IDENT_SHIFT (16) /* TX Identification Value */
+#define TX_IDENT_MASK (0xffff)
+#define RTL_VER_SHIFT (11) /* RTL Version Value */
+#define RTL_VER_MASK (0x1f)
+#define MAJOR_VER_SHIFT (8) /* Major Version Value */
+#define MAJOR_VER_MASK (0x7)
+#define MINOR_VER_SHIFT (0) /* Minor Version Value */
+#define MINOR_VER_MASK (0xff)
+
+/* Bit definitions for the CONTROL register */
+#define HW4_TS_PUSH_EN (1<<11) /* Hardware push 4 enable */
+#define HW3_TS_PUSH_EN (1<<10) /* Hardware push 3 enable */
+#define HW2_TS_PUSH_EN (1<<9) /* Hardware push 2 enable */
+#define HW1_TS_PUSH_EN (1<<8) /* Hardware push 1 enable */
+#define INT_TEST (1<<1) /* Interrupt Test */
+#define CPTS_EN (1<<0) /* Time Sync Enable */
+
+/*
+ * Definitions for the single bit resisters:
+ * TS_PUSH TS_LOAD_EN INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
+ */
+#define TS_PUSH (1<<0) /* Time stamp event push */
+#define TS_LOAD_EN (1<<0) /* Time Stamp Load */
+#define TS_PEND_RAW (1<<0) /* int read (before enable) */
+#define TS_PEND (1<<0) /* masked interrupt read (after enable) */
+#define TS_PEND_EN (1<<0) /* masked interrupt enable */
+#define EVENT_POP (1<<0) /* writing discards one event */
+
+/* Bit definitions for the EVENT_HIGH register */
+#define PORT_NUMBER_SHIFT (24) /* Indicates Ethernet port or HW pin */
+#define PORT_NUMBER_MASK (0x1f)
+#define EVENT_TYPE_SHIFT (20) /* Time sync event type */
+#define EVENT_TYPE_MASK (0xf)
+#define MESSAGE_TYPE_SHIFT (16) /* PTP message type */
+#define MESSAGE_TYPE_MASK (0xf)
+#define SEQUENCE_ID_SHIFT (0) /* PTP message sequence ID */
+#define SEQUENCE_ID_MASK (0xffff)
+
+enum {
+ CPTS_EV_PUSH, /* Time Stamp Push Event */
+ CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ CPTS_EV_RX, /* Ethernet Receive Event */
+ CPTS_EV_TX, /* Ethernet Transmit Event */
+};
+
+/* This covers any input clock up to about 500 MHz. */
+#define CPTS_OVERFLOW_PERIOD (HZ * 8)
+
+#define CPTS_FIFO_DEPTH 16
+#define CPTS_MAX_EVENTS 32
+
+struct cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 high;
+ u32 low;
+};
+
+struct cpts {
+ struct cpsw_cpts __iomem *reg;
+ int tx_enable;
+ int rx_enable;
+#ifdef CONFIG_TI_CPTS
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ spinlock_t lock; /* protects time registers */
+ u32 cc_mult; /* for the nominal frequency */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work overflow_work;
+ int phc_index;
+ struct clk *refclk;
+ unsigned long freq;
+ struct list_head events;
+ struct list_head pool;
+ struct cpts_event pool_data[CPTS_MAX_EVENTS];
+#endif
+};
+
+#ifdef CONFIG_TI_CPTS
+extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+#else
+static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+#endif
+
+extern int cpts_register(struct device *dev, struct cpts *cpts,
+ u32 mult, u32 shift);
+extern void cpts_unregister(struct cpts *cpts);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 51a96db..ae74280 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -465,7 +465,7 @@ static int davinci_mdio_resume(struct device *dev)
u32 ctrl;
spin_lock(&data->lock);
- pm_runtime_put_sync(data->dev);
+ pm_runtime_get_sync(data->dev);
/* restart the scan state machine */
ctrl = __raw_readl(&data->regs->control);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 961c832..72b775f 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -452,7 +452,7 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
if (rsn)
*buf++ = WLAN_EID_RSN;
else
- *buf++ = WLAN_EID_GENERIC;
+ *buf++ = WLAN_EID_VENDOR_SPECIFIC;
/* length filed; set later */
buf++;
@@ -540,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
break;
switch (item_id) {
- case WLAN_EID_GENERIC:
+ case WLAN_EID_VENDOR_SPECIFIC:
if ((OUI_LEN + 1 <= item_len) &&
!memcmp(pos, wpa_oui, OUI_LEN) &&
pos[OUI_LEN] == 0x01) {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 928148c..7fdeb52 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -363,11 +363,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
rndis_pkt = &msg->msg.pkt;
- /*
- * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
- * netvsc packet (ie TotalDataBufferLength != MessageLength)
- */
-
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4b746d9..945360a 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -33,11 +33,7 @@
#define DRIVER_NAME "sh_irda"
-#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
-#define __IRDARAM_LEN 0x13FF
-#else
#define __IRDARAM_LEN 0x1039
-#endif
#define IRTMR 0x1F00 /* Transfer mode */
#define IRCFR 0x1F02 /* Configuration */
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index b332112..6989ebe 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -56,6 +56,10 @@ static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
+static bool oops_only = false;
+module_param(oops_only, bool, 0600);
+MODULE_PARM_DESC(oops_only, "Only log oops messages");
+
#ifndef MODULE
static int __init option_setup(char *opt)
{
@@ -683,6 +687,8 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
struct netconsole_target *nt;
const char *tmp;
+ if (oops_only && !oops_in_progress)
+ return;
/* Avoid taking lock and disabling interrupts unnecessarily */
if (list_empty(&target_list))
return;
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 81c7bc0..383e833 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,18 +150,24 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x00181b80,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c1ef300..044b532 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -431,10 +431,24 @@ static struct dev_pm_ops mdio_bus_pm_ops = {
#endif /* CONFIG_PM */
+static ssize_t
+phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
+}
+
+static struct device_attribute mdio_dev_attrs[] = {
+ __ATTR_RO(phy_id),
+ __ATTR_NULL
+};
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
.pm = MDIO_BUS_PM_OPS,
+ .dev_attrs = mdio_dev_attrs,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 88e3991..16dceed 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -56,35 +56,52 @@ static int smsc_phy_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt (phydev);
}
-static int lan87xx_config_init(struct phy_device *phydev)
+static int lan911x_config_init(struct phy_device *phydev)
{
- /*
- * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
- * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
- * to a bug on the chip.
- *
- * When the system is powered on with the network cable being
- * disconnected all the way until after ifconfig ethX up is
- * issued for the LAN port with this PHY, connecting the cable
- * afterwards does not cause LINK change detection, while the
- * expected behavior is the Link UP being detected.
- */
- int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
- if (rc < 0)
- return rc;
-
- rc &= ~MII_LAN83C185_EDPWRDOWN;
-
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
- if (rc < 0)
- return rc;
-
return smsc_phy_ack_interrupt(phydev);
}
-static int lan911x_config_init(struct phy_device *phydev)
+/*
+ * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
+ * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
+ * does send the pulses within this interval, the PHY will remained powered
+ * down.
+ *
+ * This workaround will manually toggle the PHY on/off upon calls to read_status
+ * in order to generate link test pulses if the link is down. If a link partner
+ * is present, it will respond to the pulses, which will cause the ENERGYON bit
+ * to be set and will cause the EDPD mode to be exited.
+ */
+static int lan87xx_read_status(struct phy_device *phydev)
{
- return smsc_phy_ack_interrupt(phydev);
+ int err = genphy_read_status(phydev);
+
+ if (!phydev->link) {
+ /* Disable EDPD to wake up PHY */
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc & ~MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
+ /* Sleep 64 ms to allow ~5 link test pulses to be sent */
+ msleep(64);
+
+ /* Re-enable EDPD */
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+ }
+
+ return err;
}
static struct phy_driver smsc_phy_driver[] = {
@@ -187,8 +204,8 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .config_init = lan87xx_config_init,
+ .read_status = lan87xx_read_status,
+ .config_intr = smsc_phy_config_intr,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index eb3f5ce..0b2706a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1034,7 +1034,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-struct rtnl_link_stats64*
+static struct rtnl_link_stats64*
ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
{
struct ppp *ppp = netdev_priv(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0873cdc..b44d7b7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -68,7 +68,6 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
-#include <net/cls_cgroup.h>
#include <asm/uaccess.h>
@@ -110,16 +109,56 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
+/* 1024 is probably a high enough limit: modern hypervisors seem to support on
+ * the order of 100-200 CPUs so this leaves us some breathing space if we want
+ * to match a queue per guest CPU.
+ */
+#define MAX_TAP_QUEUES 1024
+
+#define TUN_FLOW_EXPIRE (3 * HZ)
+
+/* A tun_file connects an open character device to a tuntap netdevice. It
+ * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * to serve as one transmit queue for tuntap device. The sock_fprog and
+ * tap_filter were kept in tun_struct since they were used for filtering for the
+ * netdevice not for a specific queue (at least I didn't see the reqirement for
+ * this).
+ *
+ * RCU usage:
+ * The tun_file and tun_struct are loosely coupled, the pointer from on to the
+ * other can only be read while rcu_read_lock or rtnl_lock is held.
+ */
struct tun_file {
- atomic_t count;
- struct tun_struct *tun;
+ struct sock sk;
+ struct socket socket;
+ struct socket_wq wq;
+ struct tun_struct __rcu *tun;
struct net *net;
+ struct fasync_struct *fasync;
+ /* only used for fasnyc */
+ unsigned int flags;
+ u16 queue_index;
+};
+
+struct tun_flow_entry {
+ struct hlist_node hash_link;
+ struct rcu_head rcu;
+ struct tun_struct *tun;
+
+ u32 rxhash;
+ int queue_index;
+ unsigned long updated;
};
-struct tun_sock;
+#define TUN_NUM_FLOW_ENTRIES 1024
+/* Since the socket were moved to tun_file, to preserve the behavior of persist
+ * device, socket fileter, sndbuf and vnet header size were restore when the
+ * file were attached to a persist device.
+ */
struct tun_struct {
- struct tun_file *tfile;
+ struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
+ unsigned int numqueues;
unsigned int flags;
kuid_t owner;
kgid_t group;
@@ -128,88 +167,351 @@ struct tun_struct {
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
- struct fasync_struct *fasync;
-
- struct tap_filter txflt;
- struct socket socket;
- struct socket_wq wq;
int vnet_hdr_sz;
-
+ int sndbuf;
+ struct tap_filter txflt;
+ struct sock_fprog fprog;
+ /* protected by rtnl lock */
+ bool filter_attached;
#ifdef TUN_DEBUG
int debug;
#endif
+ spinlock_t lock;
+ struct kmem_cache *flow_cache;
+ struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
+ struct timer_list flow_gc_timer;
+ unsigned long ageing_time;
};
-struct tun_sock {
- struct sock sk;
- struct tun_struct *tun;
-};
+static inline u32 tun_hashfn(u32 rxhash)
+{
+ return rxhash & 0x3ff;
+}
-static inline struct tun_sock *tun_sk(struct sock *sk)
+static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
{
- return container_of(sk, struct tun_sock, sk);
+ struct tun_flow_entry *e;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(e, n, head, hash_link) {
+ if (e->rxhash == rxhash)
+ return e;
+ }
+ return NULL;
}
-static int tun_attach(struct tun_struct *tun, struct file *file)
+static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
+ struct hlist_head *head,
+ u32 rxhash, u16 queue_index)
{
- struct tun_file *tfile = file->private_data;
- int err;
+ struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
+ GFP_ATOMIC);
+ if (e) {
+ tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
+ rxhash, queue_index);
+ e->updated = jiffies;
+ e->rxhash = rxhash;
+ e->queue_index = queue_index;
+ e->tun = tun;
+ hlist_add_head_rcu(&e->hash_link, head);
+ }
+ return e;
+}
- ASSERT_RTNL();
+static void tun_flow_free(struct rcu_head *head)
+{
+ struct tun_flow_entry *e
+ = container_of(head, struct tun_flow_entry, rcu);
+ kmem_cache_free(e->tun->flow_cache, e);
+}
- netif_tx_lock_bh(tun->dev);
+static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
+{
+ tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
+ e->rxhash, e->queue_index);
+ hlist_del_rcu(&e->hash_link);
+ call_rcu(&e->rcu, tun_flow_free);
+}
- err = -EINVAL;
- if (tfile->tun)
- goto out;
+static void tun_flow_flush(struct tun_struct *tun)
+{
+ int i;
- err = -EBUSY;
- if (tun->tfile)
- goto out;
+ spin_lock_bh(&tun->lock);
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+ struct tun_flow_entry *e;
+ struct hlist_node *h, *n;
- err = 0;
- tfile->tun = tun;
- tun->tfile = tfile;
- tun->socket.file = file;
- netif_carrier_on(tun->dev);
- dev_hold(tun->dev);
- sock_hold(tun->socket.sk);
- atomic_inc(&tfile->count);
+ hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+ tun_flow_delete(tun, e);
+ }
+ spin_unlock_bh(&tun->lock);
+}
-out:
- netif_tx_unlock_bh(tun->dev);
- return err;
+static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
+{
+ int i;
+
+ spin_lock_bh(&tun->lock);
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+ struct tun_flow_entry *e;
+ struct hlist_node *h, *n;
+
+ hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ if (e->queue_index == queue_index)
+ tun_flow_delete(tun, e);
+ }
+ }
+ spin_unlock_bh(&tun->lock);
}
-static void __tun_detach(struct tun_struct *tun)
+static void tun_flow_cleanup(unsigned long data)
{
- /* Detach from net device */
- netif_tx_lock_bh(tun->dev);
- netif_carrier_off(tun->dev);
- tun->tfile = NULL;
- netif_tx_unlock_bh(tun->dev);
+ struct tun_struct *tun = (struct tun_struct *)data;
+ unsigned long delay = tun->ageing_time;
+ unsigned long next_timer = jiffies + delay;
+ unsigned long count = 0;
+ int i;
- /* Drop read queue */
- skb_queue_purge(&tun->socket.sk->sk_receive_queue);
+ tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
+
+ spin_lock_bh(&tun->lock);
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+ struct tun_flow_entry *e;
+ struct hlist_node *h, *n;
+
+ hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ unsigned long this_timer;
+ count++;
+ this_timer = e->updated + delay;
+ if (time_before_eq(this_timer, jiffies))
+ tun_flow_delete(tun, e);
+ else if (time_before(this_timer, next_timer))
+ next_timer = this_timer;
+ }
+ }
- /* Drop the extra count on the net device */
- dev_put(tun->dev);
+ if (count)
+ mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
+ spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb,
+ u16 queue_index)
+{
+ struct hlist_head *head;
+ struct tun_flow_entry *e;
+ unsigned long delay = tun->ageing_time;
+ u32 rxhash = skb_get_rxhash(skb);
+
+ if (!rxhash)
+ return;
+ else
+ head = &tun->flows[tun_hashfn(rxhash)];
+
+ rcu_read_lock();
+
+ if (tun->numqueues == 1)
+ goto unlock;
+
+ e = tun_flow_find(head, rxhash);
+ if (likely(e)) {
+ /* TODO: keep queueing to old queue until it's empty? */
+ e->queue_index = queue_index;
+ e->updated = jiffies;
+ } else {
+ spin_lock_bh(&tun->lock);
+ if (!tun_flow_find(head, rxhash))
+ tun_flow_create(tun, head, rxhash, queue_index);
+
+ if (!timer_pending(&tun->flow_gc_timer))
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + delay));
+ spin_unlock_bh(&tun->lock);
+ }
+
+unlock:
+ rcu_read_unlock();
+}
+
+/* We try to identify a flow through its rxhash first. The reason that
+ * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * the rxq based on the txq where the last packet of the flow comes. As
+ * the userspace application move between processors, we may get a
+ * different rxq no. here. If we could not get rxhash, then we would
+ * hope the rxq no. may help here.
+ */
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_flow_entry *e;
+ u32 txq = 0;
+ u32 numqueues = 0;
+
+ rcu_read_lock();
+ numqueues = tun->numqueues;
+
+ txq = skb_get_rxhash(skb);
+ if (txq) {
+ e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+ if (e)
+ txq = e->queue_index;
+ else
+ /* use multiply and shift instead of expensive divide */
+ txq = ((u64)txq * numqueues) >> 32;
+ } else if (likely(skb_rx_queue_recorded(skb))) {
+ txq = skb_get_rx_queue(skb);
+ while (unlikely(txq >= numqueues))
+ txq -= numqueues;
+ }
+
+ rcu_read_unlock();
+ return txq;
+}
+
+static inline bool tun_not_capable(struct tun_struct *tun)
+{
+ const struct cred *cred = current_cred();
+
+ return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ !capable(CAP_NET_ADMIN);
+}
+
+static void tun_set_real_num_queues(struct tun_struct *tun)
+{
+ netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
+ netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
+}
+
+static void __tun_detach(struct tun_file *tfile, bool clean)
+{
+ struct tun_file *ntfile;
+ struct tun_struct *tun;
+ struct net_device *dev;
+
+ tun = rcu_dereference_protected(tfile->tun,
+ lockdep_rtnl_is_held());
+ if (tun) {
+ u16 index = tfile->queue_index;
+ BUG_ON(index >= tun->numqueues);
+ dev = tun->dev;
+
+ rcu_assign_pointer(tun->tfiles[index],
+ tun->tfiles[tun->numqueues - 1]);
+ rcu_assign_pointer(tfile->tun, NULL);
+ ntfile = rcu_dereference_protected(tun->tfiles[index],
+ lockdep_rtnl_is_held());
+ ntfile->queue_index = index;
+
+ --tun->numqueues;
+ sock_put(&tfile->sk);
+
+ synchronize_net();
+ tun_flow_delete_by_queue(tun, tun->numqueues + 1);
+ /* Drop read queue */
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_set_real_num_queues(tun);
+
+ if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
+ if (dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(dev);
+ }
+
+ if (clean) {
+ BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
+ &tfile->socket.flags));
+ sk_release_kernel(&tfile->sk);
+ }
}
-static void tun_detach(struct tun_struct *tun)
+static void tun_detach(struct tun_file *tfile, bool clean)
{
rtnl_lock();
- __tun_detach(tun);
+ __tun_detach(tfile, clean);
rtnl_unlock();
}
+static void tun_detach_all(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile;
+ int i, n = tun->numqueues;
+
+ for (i = 0; i < n; i++) {
+ tfile = rcu_dereference_protected(tun->tfiles[i],
+ lockdep_rtnl_is_held());
+ BUG_ON(!tfile);
+ wake_up_all(&tfile->wq.wait);
+ rcu_assign_pointer(tfile->tun, NULL);
+ --tun->numqueues;
+ }
+ BUG_ON(tun->numqueues != 0);
+
+ synchronize_net();
+ for (i = 0; i < n; i++) {
+ tfile = rcu_dereference_protected(tun->tfiles[i],
+ lockdep_rtnl_is_held());
+ /* Drop read queue */
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ sock_put(&tfile->sk);
+ }
+}
+
+static int tun_attach(struct tun_struct *tun, struct file *file)
+{
+ struct tun_file *tfile = file->private_data;
+ int err;
+
+ err = -EINVAL;
+ if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+ goto out;
+
+ err = -EBUSY;
+ if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
+ goto out;
+
+ err = -E2BIG;
+ if (tun->numqueues == MAX_TAP_QUEUES)
+ goto out;
+
+ err = 0;
+
+ /* Re-attach the filter to presist device */
+ if (tun->filter_attached == true) {
+ err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ if (!err)
+ goto out;
+ }
+ tfile->queue_index = tun->numqueues;
+ rcu_assign_pointer(tfile->tun, tun);
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ sock_hold(&tfile->sk);
+ tun->numqueues++;
+
+ tun_set_real_num_queues(tun);
+
+ if (tun->numqueues == 1)
+ netif_carrier_on(tun->dev);
+
+ /* device is allowed to go away first, so no need to hold extra
+ * refcnt.
+ */
+
+out:
+ return err;
+}
+
static struct tun_struct *__tun_get(struct tun_file *tfile)
{
- struct tun_struct *tun = NULL;
+ struct tun_struct *tun;
- if (atomic_inc_not_zero(&tfile->count))
- tun = tfile->tun;
+ rcu_read_lock();
+ tun = rcu_dereference(tfile->tun);
+ if (tun)
+ dev_hold(tun->dev);
+ rcu_read_unlock();
return tun;
}
@@ -221,10 +523,7 @@ static struct tun_struct *tun_get(struct file *file)
static void tun_put(struct tun_struct *tun)
{
- struct tun_file *tfile = tun->tfile;
-
- if (atomic_dec_and_test(&tfile->count))
- tun_detach(tfile->tun);
+ dev_put(tun->dev);
}
/* TAP filtering */
@@ -344,38 +643,20 @@ static const struct ethtool_ops tun_ethtool_ops;
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
- struct tun_struct *tun = netdev_priv(dev);
- struct tun_file *tfile = tun->tfile;
-
- /* Inform the methods they need to stop using the dev.
- */
- if (tfile) {
- wake_up_all(&tun->wq.wait);
- if (atomic_dec_and_test(&tfile->count))
- __tun_detach(tun);
- }
-}
-
-static void tun_free_netdev(struct net_device *dev)
-{
- struct tun_struct *tun = netdev_priv(dev);
-
- BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
-
- sk_release_kernel(tun->socket.sk);
+ tun_detach_all(dev);
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
/* Net device close. */
static int tun_net_close(struct net_device *dev)
{
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
return 0;
}
@@ -383,28 +664,39 @@ static int tun_net_close(struct net_device *dev)
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ int txq = skb->queue_mapping;
+ struct tun_file *tfile;
- tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+ rcu_read_lock();
+ tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (!tun->tfile)
+ if (txq >= tun->numqueues)
goto drop;
+ tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+
+ BUG_ON(!tfile);
+
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
if (!check_filter(&tun->txflt, skb))
goto drop;
- if (tun->socket.sk->sk_filter &&
- sk_filter(tun->socket.sk, skb))
+ if (tfile->socket.sk->sk_filter &&
+ sk_filter(tfile->socket.sk, skb))
goto drop;
- if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
+ /* Limit the number of packets queued by divining txq length with the
+ * number of queues.
+ */
+ if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
+ >= dev->tx_queue_len / tun->numqueues){
if (!(tun->flags & TUN_ONE_QUEUE)) {
/* Normal queueing mode. */
/* Packet scheduler handles dropping of further packets. */
- netif_stop_queue(dev);
+ netif_stop_subqueue(dev, txq);
/* We won't see all dropped packets individually, so overrun
* error is more appropriate. */
@@ -423,18 +715,22 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
/* Enqueue packet */
- skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
+ skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
/* Notify and wake up reader process */
- if (tun->flags & TUN_FASYNC)
- kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
+
+ rcu_read_unlock();
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
+ skb_tx_error(skb);
kfree_skb(skb);
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -490,6 +786,7 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
+ .ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
@@ -505,11 +802,43 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
};
+static int tun_flow_init(struct tun_struct *tun)
+{
+ int i;
+
+ tun->flow_cache = kmem_cache_create("tun_flow_cache",
+ sizeof(struct tun_flow_entry), 0, 0,
+ NULL);
+ if (!tun->flow_cache)
+ return -ENOMEM;
+
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
+ INIT_HLIST_HEAD(&tun->flows[i]);
+
+ tun->ageing_time = TUN_FLOW_EXPIRE;
+ setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + tun->ageing_time));
+
+ return 0;
+}
+
+static void tun_flow_uninit(struct tun_struct *tun)
+{
+ del_timer_sync(&tun->flow_gc_timer);
+ tun_flow_flush(tun);
+
+ /* Wait for completion of call_rcu()'s */
+ rcu_barrier();
+ kmem_cache_destroy(tun->flow_cache);
+}
+
/* Initialize net device. */
static void tun_net_init(struct net_device *dev)
{
@@ -546,7 +875,7 @@ static void tun_net_init(struct net_device *dev)
/* Character device part */
/* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
+static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
@@ -556,11 +885,11 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
if (!tun)
return POLLERR;
- sk = tun->socket.sk;
+ sk = tfile->socket.sk;
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
- poll_wait(file, &tun->wq.wait, wait);
+ poll_wait(file, &tfile->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -579,16 +908,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
-static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
size_t prepad, size_t len,
size_t linear, int noblock)
{
- struct sock *sk = tun->socket.sk;
+ struct sock *sk = tfile->socket.sk;
struct sk_buff *skb;
int err;
- sock_update_classid(sk);
-
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
@@ -685,9 +1012,9 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
}
/* Get packet from user space buffer */
-static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
- const struct iovec *iv, size_t total_len,
- size_t count, int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ void *msg_control, const struct iovec *iv,
+ size_t total_len, size_t count, int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -757,7 +1084,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
} else
copylen = len;
- skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock);
+ skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;
@@ -854,6 +1181,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
+ tun_flow_update(tun, skb, tfile->queue_index);
return total_len;
}
@@ -862,6 +1190,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
+ struct tun_file *tfile = file->private_data;
ssize_t result;
if (!tun)
@@ -869,8 +1198,8 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
- result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count,
- file->f_flags & O_NONBLOCK);
+ result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
+ count, file->f_flags & O_NONBLOCK);
tun_put(tun);
return result;
@@ -878,6 +1207,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
+ struct tun_file *tfile,
struct sk_buff *skb,
const struct iovec *iv, int len)
{
@@ -957,7 +1287,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return total;
}
-static ssize_t tun_do_read(struct tun_struct *tun,
+static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct kiocb *iocb, const struct iovec *iv,
ssize_t len, int noblock)
{
@@ -968,12 +1298,12 @@ static ssize_t tun_do_read(struct tun_struct *tun,
tun_debug(KERN_INFO, tun, "tun_chr_read\n");
if (unlikely(!noblock))
- add_wait_queue(&tun->wq.wait, &wait);
+ add_wait_queue(&tfile->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
- if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
+ if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
if (noblock) {
ret = -EAGAIN;
break;
@@ -991,16 +1321,16 @@ static ssize_t tun_do_read(struct tun_struct *tun,
schedule();
continue;
}
- netif_wake_queue(tun->dev);
+ netif_wake_subqueue(tun->dev, tfile->queue_index);
- ret = tun_put_user(tun, skb, iv, len);
+ ret = tun_put_user(tun, tfile, skb, iv, len);
kfree_skb(skb);
break;
}
current->state = TASK_RUNNING;
if (unlikely(!noblock))
- remove_wait_queue(&tun->wq.wait, &wait);
+ remove_wait_queue(&tfile->wq.wait, &wait);
return ret;
}
@@ -1021,13 +1351,22 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = tun_do_read(tun, tfile, iocb, iv, len,
+ file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
}
+static void tun_free_netdev(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ tun_flow_uninit(tun);
+ free_netdev(dev);
+}
+
static void tun_setup(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1056,7 +1395,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
- struct tun_struct *tun;
+ struct tun_file *tfile;
wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
@@ -1070,37 +1409,46 @@ static void tun_sock_write_space(struct sock *sk)
wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
- tun = tun_sk(sk)->tun;
- kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
-}
-
-static void tun_sock_destruct(struct sock *sk)
-{
- free_netdev(tun_sk(sk)->tun->dev);
+ tfile = container_of(sk, struct tun_file, sk);
+ kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
- struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
- return tun_get_user(tun, m->msg_control, m->msg_iov, total_len,
- m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+ int ret;
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun = __tun_get(tfile);
+
+ if (!tun)
+ return -EBADFD;
+ ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
+ m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+ tun_put(tun);
+ return ret;
}
+
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
{
- struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun = __tun_get(tfile);
int ret;
+
+ if (!tun)
+ return -EBADFD;
+
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+ ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
+ tun_put(tun);
return ret;
}
@@ -1121,7 +1469,7 @@ static const struct proto_ops tun_socket_ops = {
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
- .obj_size = sizeof(struct tun_sock),
+ .obj_size = sizeof(struct tun_file),
};
static int tun_flags(struct tun_struct *tun)
@@ -1142,6 +1490,9 @@ static int tun_flags(struct tun_struct *tun)
if (tun->flags & TUN_VNET_HDR)
flags |= IFF_VNET_HDR;
+ if (tun->flags & TUN_TAP_MQ)
+ flags |= IFF_MULTI_QUEUE;
+
return flags;
}
@@ -1178,15 +1529,13 @@ static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
- struct sock *sk;
struct tun_struct *tun;
+ struct tun_file *tfile = file->private_data;
struct net_device *dev;
int err;
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
- const struct cred *cred = current_cred();
-
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -1196,11 +1545,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
return -EINVAL;
- if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
- (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
- !capable(CAP_NET_ADMIN))
+ if (tun_not_capable(tun))
return -EPERM;
- err = security_tun_dev_attach(tun->socket.sk);
+ err = security_tun_dev_attach(tfile->socket.sk);
if (err < 0)
return err;
@@ -1233,8 +1580,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (*ifr->ifr_name)
name = ifr->ifr_name;
- dev = alloc_netdev(sizeof(struct tun_struct), name,
- tun_setup);
+ dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
+ tun_setup,
+ MAX_TAP_QUEUES, MAX_TAP_QUEUES);
if (!dev)
return -ENOMEM;
@@ -1246,46 +1594,35 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
- set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
-
- err = -ENOMEM;
- sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
- if (!sk)
- goto err_free_dev;
- sk_change_net(sk, net);
- tun->socket.wq = &tun->wq;
- init_waitqueue_head(&tun->wq.wait);
- tun->socket.ops = &tun_socket_ops;
- sock_init_data(&tun->socket, sk);
- sk->sk_write_space = tun_sock_write_space;
- sk->sk_sndbuf = INT_MAX;
- sock_set_flag(sk, SOCK_ZEROCOPY);
+ tun->filter_attached = false;
+ tun->sndbuf = tfile->socket.sk->sk_sndbuf;
- tun_sk(sk)->tun = tun;
+ spin_lock_init(&tun->lock);
- security_tun_dev_post_create(sk);
+ security_tun_dev_post_create(&tfile->sk);
tun_net_init(dev);
+ if (tun_flow_init(tun))
+ goto err_free_dev;
+
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES;
dev->features = dev->hw_features;
err = register_netdevice(tun->dev);
if (err < 0)
- goto err_free_sk;
+ goto err_free_dev;
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
- sk->sk_destruct = tun_sock_destruct;
-
err = tun_attach(tun, file);
if (err < 0)
- goto failed;
+ goto err_free_dev;
}
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
@@ -1305,20 +1642,22 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
tun->flags &= ~TUN_VNET_HDR;
+ if (ifr->ifr_flags & IFF_MULTI_QUEUE)
+ tun->flags |= TUN_TAP_MQ;
+ else
+ tun->flags &= ~TUN_TAP_MQ;
+
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
if (netif_running(tun->dev))
- netif_wake_queue(tun->dev);
+ netif_tx_wake_all_queues(tun->dev);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
- err_free_sk:
- tun_free_netdev(dev);
err_free_dev:
free_netdev(dev);
- failed:
return err;
}
@@ -1373,13 +1712,91 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
return 0;
}
+static void tun_detach_filter(struct tun_struct *tun, int n)
+{
+ int i;
+ struct tun_file *tfile;
+
+ for (i = 0; i < n; i++) {
+ tfile = rcu_dereference_protected(tun->tfiles[i],
+ lockdep_rtnl_is_held());
+ sk_detach_filter(tfile->socket.sk);
+ }
+
+ tun->filter_attached = false;
+}
+
+static int tun_attach_filter(struct tun_struct *tun)
+{
+ int i, ret = 0;
+ struct tun_file *tfile;
+
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rcu_dereference_protected(tun->tfiles[i],
+ lockdep_rtnl_is_held());
+ ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ if (ret) {
+ tun_detach_filter(tun, i);
+ return ret;
+ }
+ }
+
+ tun->filter_attached = true;
+ return ret;
+}
+
+static void tun_set_sndbuf(struct tun_struct *tun)
+{
+ struct tun_file *tfile;
+ int i;
+
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rcu_dereference_protected(tun->tfiles[i],
+ lockdep_rtnl_is_held());
+ tfile->socket.sk->sk_sndbuf = tun->sndbuf;
+ }
+}
+
+static int tun_set_queue(struct file *file, struct ifreq *ifr)
+{
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun;
+ struct net_device *dev;
+ int ret = 0;
+
+ rtnl_lock();
+
+ if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
+ dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
+ if (!dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ tun = netdev_priv(dev);
+ if (dev->netdev_ops != &tap_netdev_ops &&
+ dev->netdev_ops != &tun_netdev_ops)
+ ret = -EINVAL;
+ else if (tun_not_capable(tun))
+ ret = -EPERM;
+ else
+ ret = tun_attach(tun, file);
+ } else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
+ __tun_detach(tfile, false);
+ else
+ ret = -EINVAL;
+
+unlock:
+ rtnl_unlock();
+ return ret;
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
- struct sock_fprog fprog;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
@@ -1387,7 +1804,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int vnet_hdr_sz;
int ret;
- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+ if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
} else {
@@ -1398,10 +1815,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF. */
return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
- IFF_VNET_HDR,
+ IFF_VNET_HDR | IFF_MULTI_QUEUE,
(unsigned int __user*)argp);
- }
+ } else if (cmd == TUNSETQUEUE)
+ return tun_set_queue(file, &ifr);
+ ret = 0;
rtnl_lock();
tun = __tun_get(tfile);
@@ -1422,7 +1841,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (!tun)
goto unlock;
- tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
+ tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
ret = 0;
switch (cmd) {
@@ -1444,11 +1863,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
case TUNSETPERSIST:
- /* Disable/Enable persist mode */
- if (arg)
+ /* Disable/Enable persist mode. Keep an extra reference to the
+ * module to prevent the module being unprobed.
+ */
+ if (arg) {
tun->flags |= TUN_PERSIST;
- else
+ __module_get(THIS_MODULE);
+ } else {
tun->flags &= ~TUN_PERSIST;
+ module_put(THIS_MODULE);
+ }
tun_debug(KERN_INFO, tun, "persist %s\n",
arg ? "enabled" : "disabled");
@@ -1462,7 +1886,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->owner = owner;
- tun_debug(KERN_INFO, tun, "owner set to %d\n",
+ tun_debug(KERN_INFO, tun, "owner set to %u\n",
from_kuid(&init_user_ns, tun->owner));
break;
@@ -1474,7 +1898,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->group = group;
- tun_debug(KERN_INFO, tun, "group set to %d\n",
+ tun_debug(KERN_INFO, tun, "group set to %u\n",
from_kgid(&init_user_ns, tun->group));
break;
@@ -1526,7 +1950,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
case TUNGETSNDBUF:
- sndbuf = tun->socket.sk->sk_sndbuf;
+ sndbuf = tfile->socket.sk->sk_sndbuf;
if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
ret = -EFAULT;
break;
@@ -1537,7 +1961,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
- tun->socket.sk->sk_sndbuf = sndbuf;
+ tun->sndbuf = sndbuf;
+ tun_set_sndbuf(tun);
break;
case TUNGETVNETHDRSZ:
@@ -1565,10 +1990,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = -EFAULT;
- if (copy_from_user(&fprog, argp, sizeof(fprog)))
+ if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
break;
- ret = sk_attach_filter(&fprog, tun->socket.sk);
+ ret = tun_attach_filter(tun);
break;
case TUNDETACHFILTER:
@@ -1576,7 +2001,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
- ret = sk_detach_filter(tun->socket.sk);
+ ret = 0;
+ tun_detach_filter(tun, tun->numqueues);
break;
default:
@@ -1628,27 +2054,21 @@ static long tun_chr_compat_ioctl(struct file *file,
static int tun_chr_fasync(int fd, struct file *file, int on)
{
- struct tun_struct *tun = tun_get(file);
+ struct tun_file *tfile = file->private_data;
int ret;
- if (!tun)
- return -EBADFD;
-
- tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
-
- if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
+ if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
if (on) {
ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
if (ret)
goto out;
- tun->flags |= TUN_FASYNC;
+ tfile->flags |= TUN_FASYNC;
} else
- tun->flags &= ~TUN_FASYNC;
+ tfile->flags &= ~TUN_FASYNC;
ret = 0;
out:
- tun_put(tun);
return ret;
}
@@ -1658,44 +2078,39 @@ static int tun_chr_open(struct inode *inode, struct file * file)
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
- tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
+ &tun_proto);
if (!tfile)
return -ENOMEM;
- atomic_set(&tfile->count, 0);
- tfile->tun = NULL;
+ rcu_assign_pointer(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
+ tfile->flags = 0;
+
+ rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
+ init_waitqueue_head(&tfile->wq.wait);
+
+ tfile->socket.file = file;
+ tfile->socket.ops = &tun_socket_ops;
+
+ sock_init_data(&tfile->socket, &tfile->sk);
+ sk_change_net(&tfile->sk, tfile->net);
+
+ tfile->sk.sk_write_space = tun_sock_write_space;
+ tfile->sk.sk_sndbuf = INT_MAX;
+
file->private_data = tfile;
+ set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
+
return 0;
}
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
- struct tun_struct *tun;
+ struct net *net = tfile->net;
- tun = __tun_get(tfile);
- if (tun) {
- struct net_device *dev = tun->dev;
-
- tun_debug(KERN_INFO, tun, "tun_chr_close\n");
-
- __tun_detach(tun);
-
- /* If desirable, unregister the netdevice. */
- if (!(tun->flags & TUN_PERSIST)) {
- rtnl_lock();
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(dev);
- rtnl_unlock();
- }
- }
-
- tun = tfile->tun;
- if (tun)
- sock_put(tun->socket.sk);
-
- put_net(tfile->net);
- kfree(tfile);
+ tun_detach(tfile, true);
+ put_net(net);
return 0;
}
@@ -1822,14 +2237,13 @@ static void tun_cleanup(void)
* holding a reference to the file for as long as the socket is in use. */
struct socket *tun_get_socket(struct file *file)
{
- struct tun_struct *tun;
+ struct tun_file *tfile;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
- tun = tun_get(file);
- if (!tun)
+ tfile = file->private_data;
+ if (!tfile)
return ERR_PTR(-EBADFD);
- tun_put(tun);
- return &tun->socket;
+ return &tfile->socket;
}
EXPORT_SYMBOL_GPL(tun_get_socket);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c1ae769..ef97621 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -219,6 +219,24 @@ config USB_NET_CDC_NCM
* ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
* Ericsson F5521gw Mobile Broadband Module
+config USB_NET_CDC_MBIM
+ tristate "CDC MBIM support"
+ depends on USB_USBNET
+ select USB_WDM
+ select USB_NET_CDC_NCM
+ help
+ This driver provides support for CDC MBIM (Mobile Broadband
+ Interface Model) devices. The CDC MBIM specification is
+ available from <http://www.usb.org/>.
+
+ MBIM devices require configuration using the management
+ protocol defined by the MBIM specification. This driver
+ provides unfiltered access to the MBIM control channel
+ through the associated /dev/cdc-wdmx character device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cdc_mbim.
+
config USB_NET_DM9601
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
depends on USB_USBNET
@@ -230,6 +248,8 @@ config USB_NET_DM9601
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
+ select BITREVERSE
+ select CRC16
select CRC32
help
This option adds support for SMSC LAN95XX based USB 2.0
@@ -238,6 +258,8 @@ config USB_NET_SMSC75XX
config USB_NET_SMSC95XX
tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
+ select BITREVERSE
+ select CRC16
select CRC32
help
This option adds support for SMSC LAN95XX based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index bf06300..4786913 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -31,4 +31,5 @@ obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
+obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 774d9ce..50d1673 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -25,121 +25,30 @@
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
- void *buf;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf)
- goto out;
-
- err = usb_control_msg(
- dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_GET_TIMEOUT);
- if (err == size)
- memcpy(data, buf, size);
- else if (err >= 0)
- err = -EINVAL;
- kfree(buf);
+ int ret;
+ ret = usbnet_read_cmd(dev, cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
-out:
- return err;
+ if (ret != size && ret >= 0)
+ return -EINVAL;
+ return ret;
}
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
- void *buf = NULL;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- goto out;
- }
-
- err = usb_control_msg(
- dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_SET_TIMEOUT);
- kfree(buf);
-
-out:
- return err;
-}
-
-static void asix_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
- status);
-
- kfree(req);
- usb_free_urb(urb);
+ return usbnet_write_cmd(dev, cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
}
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
- struct usb_ctrlrequest *req;
- int status;
- struct urb *urb;
-
- netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
- return;
- }
-
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
- usb_free_urb(urb);
- return;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- req->bRequest = cmd;
- req->wValue = cpu_to_le16(value);
- req->wIndex = cpu_to_le16(index);
- req->wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, size,
- asix_async_cmd_callback, req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_err(dev->net, "Error submitting the control message: status=%d\n",
- status);
- kfree(req);
- usb_free_urb(urb);
- }
+ usbnet_write_cmd_async(dev, cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
}
int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
new file mode 100644
index 0000000..42f51c7
--- /dev/null
+++ b/drivers/net/usb/cdc_mbim.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ * This driver is based on and reuse most of cdc_ncm, which is
+ * Copyright (C) ST-Ericsson 2010-2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc-wdm.h>
+#include <linux/usb/cdc_ncm.h>
+
+/* driver specific data - must match cdc_ncm usage */
+struct cdc_mbim_state {
+ struct cdc_ncm_ctx *ctx;
+ atomic_t pmcount;
+ struct usb_driver *subdriver;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+/* using a counter to merge subdriver requests with our own into a combined state */
+static int cdc_mbim_manage_power(struct usbnet *dev, int on)
+{
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ int rv = 0;
+
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees the new value */
+ rv = usb_autopm_get_interface(dev->intf);
+ if (rv < 0)
+ goto err;
+ dev->intf->needs_remote_wakeup = on;
+ usb_autopm_put_interface(dev->intf);
+ }
+err:
+ return rv;
+}
+
+static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!dev)
+ return 0;
+
+ return cdc_mbim_manage_power(dev, status);
+}
+
+
+static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_driver *subdriver = ERR_PTR(-ENODEV);
+ int ret = -ENODEV;
+ u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM;
+ struct cdc_mbim_state *info = (void *)&dev->data;
+
+ /* see if interface supports MBIM alternate setting */
+ if (intf->num_altsetting == 2) {
+ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_MBIM);
+ data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+ }
+
+ /* Probably NCM, defer for cdc_ncm_bind */
+ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ goto err;
+
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+ if (ret)
+ goto err;
+
+ ctx = info->ctx;
+
+ /* The MBIM descriptor and the status endpoint are required */
+ if (ctx->mbim_desc && dev->status)
+ subdriver = usb_cdc_wdm_register(ctx->control,
+ &dev->status->desc,
+ le16_to_cpu(ctx->mbim_desc->wMaxControlMessage),
+ cdc_mbim_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ ret = PTR_ERR(subdriver);
+ cdc_ncm_unbind(dev, intf);
+ goto err;
+ }
+
+ /* can't let usbnet use the interrupt endpoint */
+ dev->status = NULL;
+ info->subdriver = subdriver;
+
+ /* MBIM cannot do ARP */
+ dev->net->flags |= IFF_NOARP;
+
+ /* no need to put the VLAN tci in the packet headers */
+ dev->net->features |= NETIF_F_HW_VLAN_TX;
+err:
+ return ret;
+}
+
+static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+
+ /* disconnect subdriver from control interface */
+ if (info->subdriver && info->subdriver->disconnect)
+ info->subdriver->disconnect(ctx->control);
+ info->subdriver = NULL;
+
+ /* let NCM unbind clean up both control and data interface */
+ cdc_ncm_unbind(dev, intf);
+}
+
+
+static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *skb_out;
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
+ u16 tci = 0;
+ u8 *c;
+
+ if (!ctx)
+ goto error;
+
+ if (skb) {
+ if (skb->len <= sizeof(ETH_HLEN))
+ goto error;
+
+ /* mapping VLANs to MBIM sessions:
+ * no tag => IPS session <0>
+ * 1 - 255 => IPS session <vlanid>
+ * 256 - 511 => DSS session <vlanid - 256>
+ * 512 - 4095 => unsupported, drop
+ */
+ vlan_get_tag(skb, &tci);
+
+ switch (tci & 0x0f00) {
+ case 0x0000: /* VLAN ID 0 - 255 */
+ /* verify that datagram is IPv4 or IPv6 */
+ skb_reset_mac_header(skb);
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ goto error;
+ }
+ c = (u8 *)&sign;
+ c[3] = tci;
+ break;
+ case 0x0100: /* VLAN ID 256 - 511 */
+ sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
+ c = (u8 *)&sign;
+ c[3] = tci;
+ break;
+ default:
+ netif_err(dev, tx_err, dev->net,
+ "unsupported tci=0x%04x\n", tci);
+ goto error;
+ }
+ skb_pull(skb, ETH_HLEN);
+ }
+
+ spin_lock_bh(&ctx->mtx);
+ skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+ spin_unlock_bh(&ctx->mtx);
+ return skb_out;
+
+error:
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return NULL;
+}
+
+static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
+{
+ __be16 proto = htons(ETH_P_802_3);
+ struct sk_buff *skb = NULL;
+
+ if (tci < 256) { /* IPS session? */
+ if (len < sizeof(struct iphdr))
+ goto err;
+
+ switch (*buf & 0xf0) {
+ case 0x40:
+ proto = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ goto err;
+ }
+ }
+
+ skb = netdev_alloc_skb_ip_align(dev->net, len + ETH_HLEN);
+ if (!skb)
+ goto err;
+
+ /* add an ethernet header */
+ skb_put(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ eth_hdr(skb)->h_proto = proto;
+ memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
+
+ /* add datagram */
+ memcpy(skb_put(skb, len), buf, len);
+
+ /* map MBIM session to VLAN */
+ if (tci)
+ vlan_put_tag(skb, tci);
+err:
+ return skb;
+}
+
+static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+ struct sk_buff *skb;
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ int len;
+ int nframes;
+ int x;
+ int offset;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ int ndpoffset;
+ int loopcount = 50; /* arbitrary max preventing infinite loop */
+ u8 *c;
+ u16 tci;
+
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ndpoffset < 0)
+ goto error;
+
+next_ndp:
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+
+ switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) {
+ case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
+ c = (u8 *)&ndp16->dwSignature;
+ tci = c[3];
+ break;
+ case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
+ c = (u8 *)&ndp16->dwSignature;
+ tci = c[3] + 256;
+ break;
+ default:
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NDP signature <0x%08x>\n",
+ le32_to_cpu(ndp16->dwSignature));
+ goto err_ndp;
+
+ }
+
+ dpe16 = ndp16->dpe16;
+ for (x = 0; x < nframes; x++, dpe16++) {
+ offset = le16_to_cpu(dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe16->wDatagramLength);
+
+ /*
+ * CDC NCM ch. 3.7
+ * All entries after first NULL entry are to be ignored
+ */
+ if ((offset == 0) || (len == 0)) {
+ if (!x)
+ goto err_ndp; /* empty NTB */
+ break;
+ }
+
+ /* sanity checking */
+ if (((offset + len) > skb_in->len) || (len > ctx->rx_max)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, len, skb_in);
+ if (!x)
+ goto err_ndp;
+ break;
+ } else {
+ skb = cdc_mbim_process_dgram(dev, skb_in->data + offset, len, tci);
+ if (!skb)
+ goto error;
+ usbnet_skb_return(dev, skb);
+ }
+ }
+err_ndp:
+ /* are there more NDPs to process? */
+ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ndpoffset && loopcount--)
+ goto next_ndp;
+
+ return 1;
+error:
+ return 0;
+}
+
+static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ int ret = 0;
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+
+ if (ctx == NULL) {
+ ret = -1;
+ goto error;
+ }
+
+ ret = usbnet_suspend(intf, message);
+ if (ret < 0)
+ goto error;
+
+ if (intf == ctx->control && info->subdriver && info->subdriver->suspend)
+ ret = info->subdriver->suspend(intf, message);
+ if (ret < 0)
+ usbnet_resume(intf);
+
+error:
+ return ret;
+}
+
+static int cdc_mbim_resume(struct usb_interface *intf)
+{
+ int ret = 0;
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ bool callsub = (intf == ctx->control && info->subdriver && info->subdriver->resume);
+
+ if (callsub)
+ ret = info->subdriver->resume(intf);
+ if (ret < 0)
+ goto err;
+ ret = usbnet_resume(intf);
+ if (ret < 0 && callsub && info->subdriver->suspend)
+ info->subdriver->suspend(intf, PMSG_SUSPEND);
+err:
+ return ret;
+}
+
+static const struct driver_info cdc_mbim_info = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+};
+
+static const struct usb_device_id mbim_devs[] = {
+ /* This duplicate NCM entry is intentional. MBIM devices can
+ * be disguised as NCM by default, and this is necessary to
+ * allow us to bind the correct driver_info to such devices.
+ *
+ * bind() will sort out this for us, selecting the correct
+ * entry and reject the other
+ */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
+ },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
+ },
+ {
+ },
+};
+MODULE_DEVICE_TABLE(usb, mbim_devs);
+
+static struct usb_driver cdc_mbim_driver = {
+ .name = "cdc_mbim",
+ .id_table = mbim_devs,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = cdc_mbim_suspend,
+ .resume = cdc_mbim_resume,
+ .reset_resume = cdc_mbim_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(cdc_mbim_driver);
+
+MODULE_AUTHOR("Greg Suarez <gsuarez@smithmicro.com>");
+MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
+MODULE_DESCRIPTION("USB CDC MBIM host driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 74fab1a..d38bc20 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -51,90 +51,10 @@
#include <linux/atomic.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
+#include <linux/usb/cdc_ncm.h>
#define DRIVER_VERSION "14-Mar-2012"
-/* CDC NCM subclass 3.2.1 */
-#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
-
-/* Maximum NTB length */
-#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
-#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
-
-/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
-#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
-
-#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
-
-/* Default value for MaxDatagramSize */
-#define CDC_NCM_MAX_DATAGRAM_SIZE 8192 /* bytes */
-
-/*
- * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
- * the last NULL entry.
- */
-#define CDC_NCM_DPT_DATAGRAMS_MAX 40
-
-/* Restart the timer, if amount of datagrams is less than given value */
-#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
-#define CDC_NCM_TIMER_PENDING_CNT 2
-#define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC)
-
-/* The following macro defines the minimum header space */
-#define CDC_NCM_MIN_HDR_SIZE \
- (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
- (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
-
-struct cdc_ncm_data {
- struct usb_cdc_ncm_nth16 nth16;
- struct usb_cdc_ncm_ndp16 ndp16;
- struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
-};
-
-struct cdc_ncm_ctx {
- struct cdc_ncm_data tx_ncm;
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
- struct hrtimer tx_timer;
- struct tasklet_struct bh;
-
- const struct usb_cdc_ncm_desc *func_desc;
- const struct usb_cdc_header_desc *header_desc;
- const struct usb_cdc_union_desc *union_desc;
- const struct usb_cdc_ether_desc *ether_desc;
-
- struct net_device *netdev;
- struct usb_device *udev;
- struct usb_host_endpoint *in_ep;
- struct usb_host_endpoint *out_ep;
- struct usb_host_endpoint *status_ep;
- struct usb_interface *intf;
- struct usb_interface *control;
- struct usb_interface *data;
-
- struct sk_buff *tx_curr_skb;
- struct sk_buff *tx_rem_skb;
-
- spinlock_t mtx;
- atomic_t stop;
-
- u32 tx_timer_pending;
- u32 tx_curr_offset;
- u32 tx_curr_last_offset;
- u32 tx_curr_frame_num;
- u32 rx_speed;
- u32 tx_speed;
- u32 rx_max;
- u32 tx_max;
- u32 max_datagram_size;
- u16 tx_max_datagrams;
- u16 tx_remainder;
- u16 tx_modulus;
- u16 tx_ndp_modulus;
- u16 tx_seq;
- u16 rx_seq;
- u16 connected;
-};
-
static void cdc_ncm_txpath_bh(unsigned long param);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -158,17 +78,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
u8 flags;
u8 iface_no;
int err;
+ int eth_hlen;
u16 ntb_fmt_supported;
+ u32 min_dgram_size;
+ u32 min_hdr_size;
+ struct usbnet *dev = netdev_priv(ctx->netdev);
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
- err = usb_control_msg(ctx->udev,
- usb_rcvctrlpipe(ctx->udev, 0),
- USB_CDC_GET_NTB_PARAMETERS,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, &ctx->ncm_parm,
- sizeof(ctx->ncm_parm), 10000);
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
+ USB_TYPE_CLASS | USB_DIR_IN
+ |USB_RECIP_INTERFACE,
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm));
if (err < 0) {
pr_debug("failed GET_NTB_PARAMETERS\n");
return 1;
@@ -184,10 +106,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
- if (ctx->func_desc != NULL)
+ eth_hlen = ETH_HLEN;
+ min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+ min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
+ if (ctx->mbim_desc != NULL) {
+ flags = ctx->mbim_desc->bmNetworkCapabilities;
+ eth_hlen = 0;
+ min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
+ min_hdr_size = 0;
+ } else if (ctx->func_desc != NULL) {
flags = ctx->func_desc->bmNetworkCapabilities;
- else
+ } else {
flags = 0;
+ }
pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
"wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
@@ -215,49 +146,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* inform device about NTB input size changes */
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
- if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
- struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
-
- ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
- if (!ndp_in_sz) {
- err = -ENOMEM;
- goto size_err;
- }
-
- err = usb_control_msg(ctx->udev,
- usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_NTB_INPUT_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0, iface_no, ndp_in_sz, 8, 1000);
- kfree(ndp_in_sz);
- } else {
- __le32 *dwNtbInMaxSize;
- dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
- GFP_KERNEL);
- if (!dwNtbInMaxSize) {
- err = -ENOMEM;
- goto size_err;
- }
- *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
-
- err = usb_control_msg(ctx->udev,
- usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_NTB_INPUT_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0, iface_no, dwNtbInMaxSize, 4, 1000);
- kfree(dwNtbInMaxSize);
- }
-size_err:
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &dwNtbInMaxSize, 4);
if (err < 0)
pr_debug("Setting NTB Input Size failed\n");
}
/* verify maximum size of transmitted NTB in bytes */
if ((ctx->tx_max <
- (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+ (min_hdr_size + min_dgram_size)) ||
(ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
pr_debug("Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
@@ -299,93 +200,85 @@ size_err:
}
/* adjust TX-remainder according to NCM specification. */
- ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
- (ctx->tx_modulus - 1));
+ ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+ (ctx->tx_modulus - 1));
/* additional configuration */
/* set CRC Mode */
if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_CRC_MODE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_CRC_NOT_APPENDED,
- iface_no, NULL, 0, 1000);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0);
if (err < 0)
pr_debug("Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
- | USB_DIR_OUT | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0, 1000);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
if (err < 0)
pr_debug("Setting NTB format to 16-bit failed\n");
}
- ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+ ctx->max_datagram_size = min_dgram_size;
/* set Max Datagram Size (MTU) */
if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 *max_datagram_size;
- u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
- max_datagram_size = kzalloc(sizeof(*max_datagram_size),
- GFP_KERNEL);
- if (!max_datagram_size) {
- err = -ENOMEM;
+ __le16 max_datagram_size;
+ u16 eth_max_sz;
+ if (ctx->ether_desc != NULL)
+ eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ else if (ctx->mbim_desc != NULL)
+ eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ else
goto max_dgram_err;
- }
- err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
- USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, max_datagram_size,
- 2, 1000);
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
if (err < 0) {
pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
- CDC_NCM_MIN_DATAGRAM_SIZE);
+ min_dgram_size);
} else {
ctx->max_datagram_size =
- le16_to_cpu(*max_datagram_size);
+ le16_to_cpu(max_datagram_size);
/* Check Eth descriptor value */
if (ctx->max_datagram_size > eth_max_sz)
ctx->max_datagram_size = eth_max_sz;
if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size =
- CDC_NCM_MAX_DATAGRAM_SIZE;
+ ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
- if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size =
- CDC_NCM_MIN_DATAGRAM_SIZE;
+ if (ctx->max_datagram_size < min_dgram_size)
+ ctx->max_datagram_size = min_dgram_size;
/* if value changed, update device */
if (ctx->max_datagram_size !=
- le16_to_cpu(*max_datagram_size)) {
- err = usb_control_msg(ctx->udev,
- usb_sndctrlpipe(ctx->udev, 0),
+ le16_to_cpu(max_datagram_size)) {
+ err = usbnet_write_cmd(dev,
USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
0,
- iface_no, max_datagram_size,
- 2, 1000);
+ iface_no, &max_datagram_size,
+ 2);
if (err < 0)
pr_debug("SET_MAX_DGRAM_SIZE failed\n");
}
}
- kfree(max_datagram_size);
}
max_dgram_err:
- if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
- ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+ if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
+ ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
return 0;
}
@@ -451,7 +344,7 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
@@ -525,6 +418,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
break;
+ case USB_CDC_MBIM_TYPE:
+ if (buf[0] < sizeof(*(ctx->mbim_desc)))
+ break;
+
+ ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
+ break;
+
default:
break;
}
@@ -537,7 +437,7 @@ advance:
/* check if we got everything */
if ((ctx->control == NULL) || (ctx->data == NULL) ||
- (ctx->ether_desc == NULL) || (ctx->control != intf))
+ ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
goto error;
/* claim data interface, if different from control */
@@ -559,7 +459,7 @@ advance:
goto error2;
/* configure data interface */
- temp = usb_set_interface(dev->udev, iface_no, 1);
+ temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp)
goto error2;
@@ -576,11 +476,13 @@ advance:
usb_set_intfdata(ctx->control, dev);
usb_set_intfdata(ctx->intf, dev);
- temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
- if (temp)
- goto error2;
+ if (ctx->ether_desc) {
+ temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+ if (temp)
+ goto error2;
+ dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+ }
- dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
dev->in = usb_rcvbulkpipe(dev->udev,
ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
@@ -589,13 +491,6 @@ advance:
dev->status = ctx->status_ep;
dev->rx_urb_size = ctx->rx_max;
- /*
- * We should get an event when network connection is "connected" or
- * "disconnected". Set network connection in "disconnected" state
- * (carrier is OFF) during attach, so the IP network stack does not
- * start IPv6 negotiation and more.
- */
- netif_carrier_off(dev->net);
ctx->tx_speed = ctx->rx_speed = 0;
return 0;
@@ -609,8 +504,9 @@ error:
dev_info(&dev->udev->dev, "bind() failure\n");
return -ENODEV;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
-static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_driver *driver = driver_of(intf);
@@ -644,52 +540,121 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
usb_set_intfdata(ctx->intf, NULL);
cdc_ncm_free(ctx);
}
+EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
-static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
{
- if (first >= max)
- return;
- if (first >= end)
- return;
- if (end > max)
- end = max;
- memset(ptr + first, 0, end - first);
+ int ret;
+
+ /* The MBIM spec defines a NCM compatible default altsetting,
+ * which we may have matched:
+ *
+ * "Functions that implement both NCM 1.0 and MBIM (an
+ * “NCM/MBIM function”) according to this recommendation
+ * shall provide two alternate settings for the
+ * Communication Interface. Alternate setting 0, and the
+ * associated class and endpoint descriptors, shall be
+ * constructed according to the rules given for the
+ * Communication Interface in section 5 of [USBNCM10].
+ * Alternate setting 1, and the associated class and
+ * endpoint descriptors, shall be constructed according to
+ * the rules given in section 6 (USB Device Model) of this
+ * specification."
+ *
+ * Do not bind to such interfaces, allowing cdc_mbim to handle
+ * them
+ */
+#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
+ if ((intf->num_altsetting == 2) &&
+ !usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_MBIM) &&
+ cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ return -ENODEV;
+#endif
+
+ /* NCM data altsetting is always 1 */
+ ret = cdc_ncm_bind_common(dev, intf, 1);
+
+ /*
+ * We should get an event when network connection is "connected" or
+ * "disconnected". Set network connection in "disconnected" state
+ * (carrier is OFF) during attach, so the IP network stack does not
+ * start IPv6 negotiation and more.
+ */
+ netif_carrier_off(dev->net);
+ return ret;
}
-static struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
{
+ size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
+
+ if (skb->len + align > max)
+ align = max - skb->len;
+ if (align && skb_tailroom(skb) >= align)
+ memset(skb_put(skb, align), 0, align);
+}
+
+/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
+ * allocating a new one within skb
+ */
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+{
+ struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
+ struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
+ size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
+
+ /* follow the chain of NDPs, looking for a match */
+ while (ndpoffset) {
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
+ if (ndp16->dwSignature == sign)
+ return ndp16;
+ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ }
+
+ /* align new NDP */
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+
+ /* verify that there is room for the NDP and the datagram (reserve) */
+ if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+ return NULL;
+
+ /* link to it */
+ if (ndp16)
+ ndp16->wNextNdpIndex = cpu_to_le16(skb->len);
+ else
+ nth16->wNdpIndex = cpu_to_le16(skb->len);
+
+ /* push a new empty NDP */
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+ ndp16->dwSignature = sign;
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
+ return ndp16;
+}
+
+struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+{
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_ndp16 *ndp16;
struct sk_buff *skb_out;
- u32 rem;
- u32 offset;
- u32 last_offset;
- u16 n = 0, index;
+ u16 n = 0, index, ndplen;
u8 ready2send = 0;
/* if there is a remaining skb, it gets priority */
- if (skb != NULL)
+ if (skb != NULL) {
swap(skb, ctx->tx_rem_skb);
- else
+ swap(sign, ctx->tx_rem_sign);
+ } else {
ready2send = 1;
-
- /*
- * +----------------+
- * | skb_out |
- * +----------------+
- * ^ offset
- * ^ last_offset
- */
+ }
/* check if we are resuming an OUT skb */
- if (ctx->tx_curr_skb != NULL) {
- /* pop variables */
- skb_out = ctx->tx_curr_skb;
- offset = ctx->tx_curr_offset;
- last_offset = ctx->tx_curr_last_offset;
- n = ctx->tx_curr_frame_num;
+ skb_out = ctx->tx_curr_skb;
- } else {
- /* reset variables */
+ /* allocate a new OUT skb */
+ if (!skb_out) {
skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
@@ -698,35 +663,21 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
}
goto exit_no_skb;
}
+ /* fill out the initial 16-bit NTB header */
+ nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
+ nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
- /* make room for NTH and NDP */
- offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
- ctx->tx_ndp_modulus) +
- sizeof(struct usb_cdc_ncm_ndp16) +
- (ctx->tx_max_datagrams + 1) *
- sizeof(struct usb_cdc_ncm_dpe16);
-
- /* store last valid offset before alignment */
- last_offset = offset;
- /* align first Datagram offset correctly */
- offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
- /* zero buffer till the first IP datagram */
- cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
- n = 0;
+ /* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
}
- for (; n < ctx->tx_max_datagrams; n++) {
- /* check if end of transmit buffer is reached */
- if (offset >= ctx->tx_max) {
- ready2send = 1;
- break;
- }
- /* compute maximum buffer size */
- rem = ctx->tx_max - offset;
-
+ for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
+ /* send any remaining skb first */
if (skb == NULL) {
skb = ctx->tx_rem_skb;
+ sign = ctx->tx_rem_sign;
ctx->tx_rem_skb = NULL;
/* check for end of skb */
@@ -734,7 +685,14 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
break;
}
- if (skb->len > rem) {
+ /* get the appropriate NDP for this skb */
+ ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+
+ /* align beginning of next frame */
+ cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
+
+ /* check if we had enough room left for both NDP and frame */
+ if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -747,31 +705,30 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->netdev->stats.tx_dropped++;
}
ctx->tx_rem_skb = skb;
+ ctx->tx_rem_sign = sign;
skb = NULL;
ready2send = 1;
}
break;
}
- memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
-
- ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
- ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
-
- /* update offset */
- offset += skb->len;
-
- /* store last valid offset before alignment */
- last_offset = offset;
-
- /* align offset correctly */
- offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+ /* calculate frame number withing this NDP */
+ ndplen = le16_to_cpu(ndp16->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
- /* zero padding */
- cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
- ctx->tx_max);
+ /* OK, add this skb */
+ ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+ ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+ ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
dev_kfree_skb_any(skb);
skb = NULL;
+
+ /* send now if this NDP is full */
+ if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
+ ready2send = 1;
+ break;
+ }
}
/* free up any dangling skb */
@@ -787,16 +744,12 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
- ctx->tx_curr_offset = offset;
- ctx->tx_curr_last_offset = last_offset;
goto exit_no_skb;
} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
- ctx->tx_curr_offset = offset;
- ctx->tx_curr_last_offset = last_offset;
/* set the pending count */
if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
@@ -807,75 +760,24 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* variables will be reset at next call */
}
- /* check for overflow */
- if (last_offset > ctx->tx_max)
- last_offset = ctx->tx_max;
-
- /* revert offset */
- offset = last_offset;
-
/*
* If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
* we send buffers as it is. If we get more data, it would be more
* efficient for USB HS mobile device with DMA engine to receive a full
* size NTB, than canceling DMA transfer and receiving a short packet.
*/
- if (offset > CDC_NCM_MIN_TX_PKT)
- offset = ctx->tx_max;
-
- /* final zero padding */
- cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
-
- /* store last offset */
- last_offset = offset;
-
- if (((last_offset < ctx->tx_max) && ((last_offset %
- le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
- (((last_offset == ctx->tx_max) && ((ctx->tx_max %
- le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
- (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
- /* force short packet */
- *(((u8 *)skb_out->data) + last_offset) = 0;
- last_offset++;
- }
-
- /* zero the rest of the DPEs plus the last NULL entry */
- for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
- ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
- ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
- }
+ if (skb_out->len > CDC_NCM_MIN_TX_PKT)
+ /* final zero padding */
+ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
- /* fill out 16-bit NTB header */
- ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- ctx->tx_ncm.nth16.wHeaderLength =
- cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
- ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
- ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
- index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
- ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
-
- memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
- ctx->tx_seq++;
-
- /* fill out 16-bit NDP table */
- ctx->tx_ncm.ndp16.dwSignature =
- cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
- rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
- sizeof(struct usb_cdc_ncm_dpe16));
- ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
- ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
-
- memcpy(((u8 *)skb_out->data) + index,
- &(ctx->tx_ncm.ndp16),
- sizeof(ctx->tx_ncm.ndp16));
+ /* do we need to prevent a ZLP? */
+ if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
+ (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+ *skb_put(skb_out, 1) = 0; /* force short packet */
- memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
- &(ctx->tx_ncm.dpe16),
- (ctx->tx_curr_frame_num + 1) *
- sizeof(struct usb_cdc_ncm_dpe16));
-
- /* set frame length */
- skb_put(skb_out, last_offset);
+ /* set final frame length */
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ nth16->wBlockLength = cpu_to_le16(skb_out->len);
/* return skb */
ctx->tx_curr_skb = NULL;
@@ -888,6 +790,7 @@ exit_no_skb:
cdc_ncm_tx_timeout_start(ctx);
return NULL;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_fill_tx_frame);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
{
@@ -922,6 +825,8 @@ static void cdc_ncm_txpath_bh(unsigned long param)
netif_tx_lock_bh(ctx->netdev);
usbnet_start_xmit(NULL, ctx->netdev);
netif_tx_unlock_bh(ctx->netdev);
+ } else {
+ spin_unlock_bh(&ctx->mtx);
}
}
@@ -942,7 +847,7 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+ skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -953,17 +858,12 @@ error:
return NULL;
}
-static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+/* verify NTB header and return offset of first NDP, or negative error */
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
- struct sk_buff *skb;
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
- int nframes;
- int x;
- int offset;
struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
+ int len;
+ int ret = -EINVAL;
if (ctx == NULL)
goto error;
@@ -997,20 +897,23 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
}
ctx->rx_seq = le16_to_cpu(nth16->wSequence);
- len = le16_to_cpu(nth16->wNdpIndex);
- if ((len + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
- pr_debug("invalid DPT16 index <%u>\n",
- le16_to_cpu(nth16->wNdpIndex));
- goto error;
- }
+ ret = le16_to_cpu(nth16->wNdpIndex);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(((u8 *)skb_in->data) + len);
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
+{
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ int ret = -EINVAL;
- if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
- pr_debug("invalid DPT16 signature <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
+ pr_debug("invalid NDP offset <%u>\n", ndpoffset);
goto error;
}
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
pr_debug("invalid DPT16 length <%u>\n",
@@ -1018,20 +921,52 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
goto error;
}
- nframes = ((le16_to_cpu(ndp16->wLength) -
+ ret = ((le16_to_cpu(ndp16->wLength) -
sizeof(struct usb_cdc_ncm_ndp16)) /
sizeof(struct usb_cdc_ncm_dpe16));
- nframes--; /* we process NDP entries except for the last one */
-
- len += sizeof(struct usb_cdc_ncm_ndp16);
+ ret--; /* we process NDP entries except for the last one */
- if ((len + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) >
+ if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
skb_in->len) {
- pr_debug("Invalid nframes = %d\n", nframes);
- goto error;
+ pr_debug("Invalid nframes = %d\n", ret);
+ ret = -EINVAL;
}
- dpe16 = (struct usb_cdc_ncm_dpe16 *)(((u8 *)skb_in->data) + len);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+ struct sk_buff *skb;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ int len;
+ int nframes;
+ int x;
+ int offset;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ int ndpoffset;
+ int loopcount = 50; /* arbitrary max preventing infinite loop */
+
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ndpoffset < 0)
+ goto error;
+
+next_ndp:
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+
+ if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+ pr_debug("invalid DPT16 signature <%u>\n",
+ le32_to_cpu(ndp16->dwSignature));
+ goto err_ndp;
+ }
+ dpe16 = ndp16->dpe16;
for (x = 0; x < nframes; x++, dpe16++) {
offset = le16_to_cpu(dpe16->wDatagramIndex);
@@ -1043,7 +978,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
*/
if ((offset == 0) || (len == 0)) {
if (!x)
- goto error; /* empty NTB */
+ goto err_ndp; /* empty NTB */
break;
}
@@ -1054,7 +989,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
"offset[%u]=%u, length=%u, skb=%p\n",
x, offset, len, skb_in);
if (!x)
- goto error;
+ goto err_ndp;
break;
} else {
@@ -1067,6 +1002,12 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
usbnet_skb_return(dev, skb);
}
}
+err_ndp:
+ /* are there more NDPs to process? */
+ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ndpoffset && loopcount--)
+ goto next_ndp;
+
return 1;
error:
return 0;
@@ -1131,7 +1072,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- ctx->connected = event->wValue;
+ ctx->connected = le16_to_cpu(event->wValue);
printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
" %sconnected\n",
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index e0433ce..3f554c1 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -56,27 +56,12 @@
static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- void *buf;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length);
-
- buf = kmalloc(length, GFP_KERNEL);
- if (!buf)
- goto out;
-
- err = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- DM_READ_REGS,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
- if (err == length)
- memcpy(data, buf, length);
- else if (err >= 0)
+ int err;
+ err = usbnet_read_cmd(dev, DM_READ_REGS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, length);
+ if(err != length && err >= 0)
err = -EINVAL;
- kfree(buf);
-
- out:
return err;
}
@@ -87,91 +72,29 @@ static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- void *buf = NULL;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
+ int err;
+ err = usbnet_write_cmd(dev, DM_WRITE_REGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, length);
- if (data) {
- buf = kmemdup(data, length, GFP_KERNEL);
- if (!buf)
- goto out;
- }
-
- err = usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- DM_WRITE_REGS,
- USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
- 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
- kfree(buf);
if (err >= 0 && err < length)
err = -EINVAL;
- out:
return err;
}
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n",
- reg, value);
- return usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- DM_WRITE_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
- value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT);
-}
-
-static void dm_write_async_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n",
- status);
-
- kfree(req);
- usb_free_urb(urb);
+ return usbnet_write_cmd(dev, DM_WRITE_REGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, NULL, 0);
}
static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
u16 length, void *data)
{
- struct usb_ctrlrequest *req;
- struct urb *urb;
- int status;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
- return;
- }
-
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
- usb_free_urb(urb);
- return;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- req->bRequest = length ? DM_WRITE_REGS : DM_WRITE_REG;
- req->wValue = cpu_to_le16(value);
- req->wIndex = cpu_to_le16(reg);
- req->wLength = cpu_to_le16(length);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, length,
- dm_write_async_callback, req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_err(dev->net, "Error submitting the control message: status=%d\n",
- status);
- kfree(req);
- usb_free_urb(urb);
- }
+ usbnet_write_cmd_async(dev, DM_WRITE_REGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, data, length);
}
static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 8de6417..ace9e74 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -116,23 +116,8 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
return skb;
}
-static void int51x1_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- dev_warn(&urb->dev->dev, "async callback failed with %d\n", status);
-
- kfree(req);
- usb_free_urb(urb);
-}
-
static void int51x1_set_multicast(struct net_device *netdev)
{
- struct usb_ctrlrequest *req;
- int status;
- struct urb *urb;
struct usbnet *dev = netdev_priv(netdev);
u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
@@ -149,40 +134,9 @@ static void int51x1_set_multicast(struct net_device *netdev)
netdev_dbg(dev->net, "receive own packets only\n");
}
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_warn(dev->net, "Error allocating URB\n");
- return;
- }
-
- req = kmalloc(sizeof(*req), GFP_ATOMIC);
- if (!req) {
- netdev_warn(dev->net, "Error allocating control msg\n");
- goto out;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- req->bRequest = SET_ETHERNET_PACKET_FILTER;
- req->wValue = cpu_to_le16(filter);
- req->wIndex = 0;
- req->wLength = 0;
-
- usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
- (void *)req, NULL, 0,
- int51x1_async_cmd_callback,
- (void *)req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
- status);
- goto out1;
- }
- return;
-out1:
- kfree(req);
-out:
- usb_free_urb(urb);
+ usbnet_write_cmd_async(dev, SET_ETHERNET_PACKET_FILTER,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ filter, 0, NULL, 0);
}
static const struct net_device_ops int51x1_netdev_ops = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index cc7e720..3f3f566 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -124,93 +124,20 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
{
- struct usb_device *xdev = dev->udev;
- int ret;
- void *buffer;
-
- buffer = kmalloc(size, GFP_NOIO);
- if (buffer == NULL)
- return -ENOMEM;
-
- ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
- MCS7830_RD_BMREQ, 0x0000, index, buffer,
- size, MCS7830_CTRL_TIMEOUT);
- memcpy(data, buffer, size);
- kfree(buffer);
-
- return ret;
+ return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ,
+ 0x0000, index, data, size);
}
static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
{
- struct usb_device *xdev = dev->udev;
- int ret;
- void *buffer;
-
- buffer = kmemdup(data, size, GFP_NOIO);
- if (buffer == NULL)
- return -ENOMEM;
-
- ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
- MCS7830_WR_BMREQ, 0x0000, index, buffer,
- size, MCS7830_CTRL_TIMEOUT);
- kfree(buffer);
- return ret;
-}
-
-static void mcs7830_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "%s() failed with %d\n",
- __func__, status);
-
- kfree(req);
- usb_free_urb(urb);
+ return usbnet_write_cmd(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
+ 0x0000, index, data, size);
}
static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data)
{
- struct usb_ctrlrequest *req;
- int ret;
- struct urb *urb;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_dbg(&dev->udev->dev,
- "Error allocating URB in write_cmd_async!\n");
- return;
- }
-
- req = kmalloc(sizeof *req, GFP_ATOMIC);
- if (!req) {
- dev_err(&dev->udev->dev,
- "Failed to allocate memory for control request\n");
- goto out;
- }
- req->bRequestType = MCS7830_WR_BMREQ;
- req->bRequest = MCS7830_WR_BREQ;
- req->wValue = 0;
- req->wIndex = cpu_to_le16(index);
- req->wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, size,
- mcs7830_async_cmd_callback, req);
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(&dev->udev->dev,
- "Error submitting the control message: ret=%d\n", ret);
- goto out;
- }
- return;
-out:
- kfree(req);
- usb_free_urb(urb);
+ usbnet_write_cmd_async(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
+ 0x0000, index, data, size);
}
static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index c062a3e..93e0716 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -109,13 +109,11 @@ struct nc_trailer {
static int
nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
{
- int status = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- req,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, regnum,
- retval_ptr, sizeof *retval_ptr,
- USB_CTRL_GET_TIMEOUT);
+ int status = usbnet_read_cmd(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, regnum, retval_ptr,
+ sizeof *retval_ptr);
if (status > 0)
status = 0;
if (!status)
@@ -133,13 +131,9 @@ nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
static void
nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
{
- usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- req,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, regnum,
- NULL, 0, // data is in setup packet
- USB_CTRL_SET_TIMEOUT);
+ usbnet_write_cmd(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, regnum, NULL, 0);
}
static inline void
@@ -288,37 +282,34 @@ static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
static int net1080_reset(struct usbnet *dev)
{
u16 usbctl, status, ttl;
- u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
+ u16 vp;
int retval;
- if (!vp)
- return -ENOMEM;
-
// nc_dump_registers(dev);
- if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
+ if ((retval = nc_register_read(dev, REG_STATUS, &vp)) < 0) {
netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
dev->udev->bus->bus_name, dev->udev->devpath, retval);
goto done;
}
- status = *vp;
+ status = vp;
nc_dump_status(dev, status);
- if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
+ if ((retval = nc_register_read(dev, REG_USBCTL, &vp)) < 0) {
netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
goto done;
}
- usbctl = *vp;
+ usbctl = vp;
nc_dump_usbctl(dev, usbctl);
nc_register_write(dev, REG_USBCTL,
USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
- if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
+ if ((retval = nc_register_read(dev, REG_TTL, &vp)) < 0) {
netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
goto done;
}
- ttl = *vp;
+ ttl = vp;
// nc_dump_ttl(dev, ttl);
nc_register_write(dev, REG_TTL,
@@ -331,7 +322,6 @@ static int net1080_reset(struct usbnet *dev)
retval = 0;
done:
- kfree(vp);
return retval;
}
@@ -339,13 +329,10 @@ static int net1080_check_connect(struct usbnet *dev)
{
int retval;
u16 status;
- u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
+ u16 vp;
- if (!vp)
- return -ENOMEM;
- retval = nc_register_read(dev, REG_STATUS, vp);
- status = *vp;
- kfree(vp);
+ retval = nc_register_read(dev, REG_STATUS, &vp);
+ status = vp;
if (retval != 0) {
netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
return retval;
@@ -355,59 +342,22 @@ static int net1080_check_connect(struct usbnet *dev)
return 0;
}
-static void nc_flush_complete(struct urb *urb)
-{
- kfree(urb->context);
- usb_free_urb(urb);
-}
-
static void nc_ensure_sync(struct usbnet *dev)
{
- dev->frame_errors++;
- if (dev->frame_errors > 5) {
- struct urb *urb;
- struct usb_ctrlrequest *req;
- int status;
-
- /* Send a flush */
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return;
-
- req = kmalloc(sizeof *req, GFP_ATOMIC);
- if (!req) {
- usb_free_urb(urb);
- return;
- }
+ if (++dev->frame_errors <= 5)
+ return;
- req->bRequestType = USB_DIR_OUT
- | USB_TYPE_VENDOR
- | USB_RECIP_DEVICE;
- req->bRequest = REQUEST_REGISTER;
- req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS
- | USBCTL_FLUSH_OTHER);
- req->wIndex = cpu_to_le16(REG_USBCTL);
- req->wLength = cpu_to_le16(0);
-
- /* queue an async control request, we don't need
- * to do anything when it finishes except clean up.
- */
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (unsigned char *) req,
- NULL, 0,
- nc_flush_complete, req);
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- kfree(req);
- usb_free_urb(urb);
- return;
- }
+ if (usbnet_write_cmd_async(dev, REQUEST_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ USBCTL_FLUSH_THIS |
+ USBCTL_FLUSH_OTHER,
+ REG_USBCTL, NULL, 0))
+ return;
- netif_dbg(dev, rx_err, dev->net,
- "flush net1080; too many framing errors\n");
- dev->frame_errors = 0;
- }
+ netif_dbg(dev, rx_err, dev->net,
+ "flush net1080; too many framing errors\n");
+ dev->frame_errors = 0;
}
static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 4584b9a..0fcc8e6 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -71,13 +71,10 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- req,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- val, index,
- NULL, 0,
- USB_CTRL_GET_TIMEOUT);
+ return usbnet_read_cmd(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ val, index, NULL, 0);
}
static inline int
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index c27d277..18dd425 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -311,10 +311,9 @@ static int sierra_net_send_cmd(struct usbnet *dev,
struct sierra_net_data *priv = sierra_net_get_private(dev);
int status;
- status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_CDC_SEND_ENCAPSULATED_COMMAND,
- USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
- priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+ status = usbnet_write_cmd(dev, USB_CDC_SEND_ENCAPSULATED_COMMAND,
+ USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+ 0, priv->ifnum, cmd, cmdlen);
if (status != cmdlen && status != -ENODEV)
netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
@@ -340,7 +339,7 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
priv->tx_hdr_template[0] = 0x3F;
priv->tx_hdr_template[1] = ctx_ix;
- *((u16 *)&priv->tx_hdr_template[2]) =
+ *((__be16 *)&priv->tx_hdr_template[2]) =
cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
}
@@ -632,32 +631,22 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
{
int result = 0;
- u16 *attrdata;
-
- attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
- if (!attrdata)
- return -ENOMEM;
-
- result = usb_control_msg(
- dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- /* _u8 vendor specific request */
- SWI_USB_REQUEST_GET_FW_ATTR,
- USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
- 0x0000, /* __u16 value not used */
- 0x0000, /* __u16 index not used */
- attrdata, /* char *data */
- sizeof(*attrdata), /* __u16 size */
- USB_CTRL_SET_TIMEOUT); /* int timeout */
-
- if (result < 0) {
- kfree(attrdata);
+ __le16 attrdata;
+
+ result = usbnet_read_cmd(dev,
+ /* _u8 vendor specific request */
+ SWI_USB_REQUEST_GET_FW_ATTR,
+ USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
+ 0x0000, /* __u16 value not used */
+ 0x0000, /* __u16 index not used */
+ &attrdata, /* char *data */
+ sizeof(attrdata) /* __u16 size */
+ );
+
+ if (result < 0)
return -EIO;
- }
-
- *datap = le16_to_cpu(*attrdata);
- kfree(attrdata);
+ *datap = le16_to_cpu(attrdata);
return result;
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index b77ae76..c5353cf 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -26,6 +26,8 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
@@ -52,7 +54,8 @@
#define USB_PRODUCT_ID_LAN7500 (0x7500)
#define USB_PRODUCT_ID_LAN7505 (0x7505)
#define RXW_PADDING 2
-#define SUPPORTED_WAKE (WAKE_MAGIC)
+#define SUPPORTED_WAKE (WAKE_UCAST | WAKE_BCAST | \
+ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
#define check_warn(ret, fmt, args...) \
({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -82,71 +85,92 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
+static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_READ_REGISTER,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (!in_pm)
+ fn = usbnet_read_cmd;
+ else
+ fn = usbnet_read_cmd_nopm;
+ ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
netdev_warn(dev->net,
"Failed to read reg index 0x%08x: %d", index, ret);
- le32_to_cpus(buf);
- *data = *buf;
- kfree(buf);
+ le32_to_cpus(&buf);
+ *data = buf;
return ret;
}
-static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
+static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- *buf = data;
- cpu_to_le32s(buf);
+ if (!in_pm)
+ fn = usbnet_write_cmd;
+ else
+ fn = usbnet_write_cmd_nopm;
- ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_WRITE_REGISTER,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+ buf = data;
+ cpu_to_le32s(&buf);
+ ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
netdev_warn(dev->net,
"Failed to write reg index 0x%08x: %d", index, ret);
- kfree(buf);
-
return ret;
}
+static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ return __smsc75xx_read_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ return __smsc75xx_write_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ return __smsc75xx_read_reg(dev, index, data, 0);
+}
+
+static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ return __smsc75xx_write_reg(dev, index, data, 0);
+}
+
static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
{
if (WARN_ON_ONCE(!dev))
return -EINVAL;
- cpu_to_le32s(&feature);
-
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ return usbnet_write_cmd_nopm(dev, USB_REQ_SET_FEATURE,
+ USB_DIR_OUT | USB_RECIP_DEVICE,
+ feature, 0, NULL, 0);
}
static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
@@ -154,11 +178,9 @@ static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
if (WARN_ON_ONCE(!dev))
return -EINVAL;
- cpu_to_le32s(&feature);
-
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ return usbnet_write_cmd_nopm(dev, USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_RECIP_DEVICE,
+ feature, 0, NULL, 0);
}
/* Loop until the read is completed with timeout
@@ -804,13 +826,16 @@ static int smsc75xx_set_features(struct net_device *netdev,
return 0;
}
-static int smsc75xx_wait_ready(struct usbnet *dev)
+static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
{
int timeout = 0;
do {
u32 buf;
- int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ int ret;
+
+ ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm);
+
check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
if (buf & PMT_CTL_DEV_RDY)
@@ -832,7 +857,7 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
- ret = smsc75xx_wait_ready(dev);
+ ret = smsc75xx_wait_ready(dev, 0);
check_warn_return(ret, "device not ready in smsc75xx_reset");
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
@@ -1154,6 +1179,36 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
}
}
+static u16 smsc_crc(const u8 *buffer, size_t len)
+{
+ return bitrev16(crc16(0xFFFF, buffer, len));
+}
+
+static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg,
+ u32 wuf_mask1)
+{
+ int cfg_base = WUF_CFGX + filter * 4;
+ int mask_base = WUF_MASKX + filter * 16;
+ int ret;
+
+ ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg);
+ check_warn_return(ret, "Error writing WUF_CFGX");
+
+ ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1);
+ check_warn_return(ret, "Error writing WUF_MASKX");
+
+ ret = smsc75xx_write_reg(dev, mask_base + 4, 0);
+ check_warn_return(ret, "Error writing WUF_MASKX");
+
+ ret = smsc75xx_write_reg(dev, mask_base + 8, 0);
+ check_warn_return(ret, "Error writing WUF_MASKX");
+
+ ret = smsc75xx_write_reg(dev, mask_base + 12, 0);
+ check_warn_return(ret, "Error writing WUF_MASKX");
+
+ return 0;
+}
+
static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1169,101 +1224,156 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
netdev_info(dev->net, "entering SUSPEND2 mode");
/* disable energy detect (link up) & wake up events */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
val &= ~(WUCSR_MPEN | WUCSR_WUEN);
- ret = smsc75xx_write_reg(dev, WUCSR, val);
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
check_warn_return(ret, "Error reading PMT_CTL");
val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
check_warn_return(ret, "Error writing PMT_CTL");
/* enter suspend2 mode */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
check_warn_return(ret, "Error reading PMT_CTL");
val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
val |= PMT_CTL_SUS_MODE_2;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
check_warn_return(ret, "Error writing PMT_CTL");
return 0;
}
- if (pdata->wolopts & WAKE_MAGIC) {
- /* clear any pending magic packet status */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) {
+ int i, filter = 0;
+
+ /* disable all filters */
+ for (i = 0; i < WUF_NUM; i++) {
+ ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0);
+ check_warn_return(ret, "Error writing WUF_CFGX");
+ }
+
+ if (pdata->wolopts & WAKE_MCAST) {
+ const u8 mcast[] = {0x01, 0x00, 0x5E};
+ netdev_info(dev->net, "enabling multicast detection");
+
+ val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST
+ | smsc_crc(mcast, 3);
+ ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007);
+ check_warn_return(ret, "Error writing wakeup filter");
+ }
+
+ if (pdata->wolopts & WAKE_ARP) {
+ const u8 arp[] = {0x08, 0x06};
+ netdev_info(dev->net, "enabling ARP detection");
+
+ val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16)
+ | smsc_crc(arp, 2);
+ ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003);
+ check_warn_return(ret, "Error writing wakeup filter");
+ }
+
+ /* clear any pending pattern match packet status */
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
- val |= WUCSR_MPR;
+ val |= WUCSR_WUFR;
- ret = smsc75xx_write_reg(dev, WUCSR, val);
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ netdev_info(dev->net, "enabling packet match detection");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val |= WUCSR_WUEN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+ } else {
+ netdev_info(dev->net, "disabling packet match detection");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val &= ~WUCSR_WUEN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
}
- /* enable/disable magic packup wake */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ /* disable magic, bcast & unicast wakeup sources */
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
+ val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN);
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
if (pdata->wolopts & WAKE_MAGIC) {
netdev_info(dev->net, "enabling magic packet wakeup");
- val |= WUCSR_MPEN;
- } else {
- netdev_info(dev->net, "disabling magic packet wakeup");
- val &= ~WUCSR_MPEN;
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ /* clear any pending magic packet status */
+ val |= WUCSR_MPR | WUCSR_MPEN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
}
- ret = smsc75xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ if (pdata->wolopts & WAKE_BCAST) {
+ netdev_info(dev->net, "enabling broadcast detection");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
- /* enable wol wakeup source */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ val |= WUCSR_BCAST_FR | WUCSR_BCST_EN;
- val |= PMT_CTL_WOL_EN;
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+ }
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ if (pdata->wolopts & WAKE_UCAST) {
+ netdev_info(dev->net, "enabling unicast detection");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val |= WUCSR_WUFR | WUCSR_PFDA_EN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+ }
- /* enable receiver */
- ret = smsc75xx_read_reg(dev, MAC_RX, &val);
+ /* enable receiver to enable frame reception */
+ ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val);
check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
val |= MAC_RX_RXEN;
- ret = smsc75xx_write_reg(dev, MAC_RX, val);
+ ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val);
check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
/* some wol options are enabled, so enter SUSPEND0 */
netdev_info(dev->net, "entering SUSPEND0 mode");
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
check_warn_return(ret, "Error reading PMT_CTL");
- val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
- val |= PMT_CTL_SUS_MODE_0;
+ val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST));
+ val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
check_warn_return(ret, "Error writing PMT_CTL");
- /* clear wol status */
- val &= ~PMT_CTL_WUPS;
- val |= PMT_CTL_WUPS_WOL;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
-
- /* read back PMT_CTL */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
-
smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
return 0;
@@ -1276,42 +1386,43 @@ static int smsc75xx_resume(struct usb_interface *intf)
int ret;
u32 val;
- if (pdata->wolopts & WAKE_MAGIC) {
+ if (pdata->wolopts) {
netdev_info(dev->net, "resuming from SUSPEND0");
smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
- /* Disable magic packup wake */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ /* Disable wakeup sources */
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
- val &= ~WUCSR_MPEN;
+ val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN
+ | WUCSR_BCST_EN);
- ret = smsc75xx_write_reg(dev, WUCSR, val);
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
/* clear wake-up status */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
check_warn_return(ret, "Error reading PMT_CTL");
val &= ~PMT_CTL_WOL_EN;
val |= PMT_CTL_WUPS;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
check_warn_return(ret, "Error writing PMT_CTL");
} else {
netdev_info(dev->net, "resuming from SUSPEND2");
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
check_warn_return(ret, "Error reading PMT_CTL");
val |= PMT_CTL_PHY_PWRUP;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
check_warn_return(ret, "Error writing PMT_CTL");
}
- ret = smsc75xx_wait_ready(dev);
+ ret = smsc75xx_wait_ready(dev, 1);
check_warn_return(ret, "device not ready in smsc75xx_resume");
return usbnet_resume(intf);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 362cb8c..e083f53 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -26,6 +26,8 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
@@ -46,7 +48,8 @@
#define SMSC95XX_INTERNAL_PHY_ID (1)
#define SMSC95XX_TX_OVERHEAD (8)
#define SMSC95XX_TX_OVERHEAD_CSUM (12)
-#define SUPPORTED_WAKE (WAKE_MAGIC)
+#define SUPPORTED_WAKE (WAKE_UCAST | WAKE_BCAST | \
+ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
#define check_warn(ret, fmt, args...) \
({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -63,80 +66,98 @@ struct smsc95xx_priv {
u32 hash_lo;
u32 wolopts;
spinlock_t mac_cr_lock;
-};
-
-struct usb_context {
- struct usb_ctrlrequest req;
- struct usbnet *dev;
+ int wuff_filter_count;
};
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
+static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_READ_REGISTER,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (!in_pm)
+ fn = usbnet_read_cmd;
+ else
+ fn = usbnet_read_cmd_nopm;
+ ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
- netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);
+ netdev_warn(dev->net,
+ "Failed to read reg index 0x%08x: %d", index, ret);
- le32_to_cpus(buf);
- *data = *buf;
- kfree(buf);
+ le32_to_cpus(&buf);
+ *data = buf;
return ret;
}
-static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
+static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- *buf = data;
- cpu_to_le32s(buf);
+ if (!in_pm)
+ fn = usbnet_write_cmd;
+ else
+ fn = usbnet_write_cmd_nopm;
- ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_WRITE_REGISTER,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+ buf = data;
+ cpu_to_le32s(&buf);
+ ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
- netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index);
-
- kfree(buf);
+ netdev_warn(dev->net,
+ "Failed to write reg index 0x%08x: %d", index, ret);
return ret;
}
+static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ return __smsc95xx_read_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ return __smsc95xx_write_reg(dev, index, data, 1);
+}
+
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ return __smsc95xx_read_reg(dev, index, data, 0);
+}
+
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ return __smsc95xx_write_reg(dev, index, data, 0);
+}
static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
{
if (WARN_ON_ONCE(!dev))
return -EINVAL;
- cpu_to_le32s(&feature);
-
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ return usbnet_write_cmd_nopm(dev, USB_REQ_SET_FEATURE,
+ USB_RECIP_DEVICE, feature, 0,
+ NULL, 0);
}
static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
@@ -144,11 +165,9 @@ static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
if (WARN_ON_ONCE(!dev))
return -EINVAL;
- cpu_to_le32s(&feature);
-
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ return usbnet_write_cmd_nopm(dev, USB_REQ_CLEAR_FEATURE,
+ USB_RECIP_DEVICE, feature,
+ 0, NULL, 0);
}
/* Loop until the read is completed with timeout
@@ -350,60 +369,20 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
return 0;
}
-static void smsc95xx_async_cmd_callback(struct urb *urb)
-{
- struct usb_context *usb_context = urb->context;
- struct usbnet *dev = usb_context->dev;
- int status = urb->status;
-
- check_warn(status, "async callback failed with %d\n", status);
-
- kfree(usb_context);
- usb_free_urb(urb);
-}
-
static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
u32 *data)
{
- struct usb_context *usb_context;
- int status;
- struct urb *urb;
const u16 size = 4;
+ int ret;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_warn(dev->net, "Error allocating URB\n");
- return -ENOMEM;
- }
-
- usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
- if (usb_context == NULL) {
- netdev_warn(dev->net, "Error allocating control msg\n");
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- usb_context->req.bRequestType =
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- usb_context->req.bRequest = USB_VENDOR_REQUEST_WRITE_REGISTER;
- usb_context->req.wValue = 00;
- usb_context->req.wIndex = cpu_to_le16(index);
- usb_context->req.wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
- (void *)&usb_context->req, data, size,
- smsc95xx_async_cmd_callback,
- (void *)usb_context);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
- status);
- kfree(usb_context);
- usb_free_urb(urb);
- }
-
- return status;
+ ret = usbnet_write_cmd_async(dev, USB_VENDOR_REQUEST_WRITE_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, index, data, size);
+ if (ret < 0)
+ netdev_warn(dev->net, "Error write async cmd, sts=%d\n",
+ ret);
+ return ret;
}
/* returns hash bit number for given MAC address
@@ -765,7 +744,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
}
/* Starts the Receive path */
-static int smsc95xx_start_rx_path(struct usbnet *dev)
+static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
@@ -775,7 +754,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
return 0;
@@ -994,7 +973,7 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_start_tx_path(dev);
check_warn_return(ret, "Failed to start TX path");
- ret = smsc95xx_start_rx_path(dev);
+ ret = smsc95xx_start_rx_path(dev, 0);
check_warn_return(ret, "Failed to start RX path");
netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
@@ -1017,6 +996,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = NULL;
+ u32 val;
int ret;
printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
@@ -1047,6 +1027,15 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
/* Init all registers */
ret = smsc95xx_reset(dev);
+ /* detect device revision as different features may be available */
+ ret = smsc95xx_read_reg(dev, ID_REV, &val);
+ check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
+ val >>= 16;
+ if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9512_))
+ pdata->wuff_filter_count = LAN9500A_WUFF_NUM;
+ else
+ pdata->wuff_filter_count = LAN9500_WUFF_NUM;
+
dev->net->netdev_ops = &smsc95xx_netdev_ops;
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
@@ -1066,6 +1055,11 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
}
}
+static u16 smsc_crc(const u8 *buffer, size_t len, int filter)
+{
+ return bitrev16(crc16(0xFFFF, buffer, len)) << ((filter % 2) * 16);
+}
+
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1081,50 +1075,153 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
netdev_info(dev->net, "entering SUSPEND2 mode");
/* disable energy detect (link up) & wake up events */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
- ret = smsc95xx_write_reg(dev, WUCSR, val);
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
check_warn_return(ret, "Error reading PM_CTRL");
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
check_warn_return(ret, "Error writing PM_CTRL");
/* enter suspend2 mode */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
check_warn_return(ret, "Error reading PM_CTRL");
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_2;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
check_warn_return(ret, "Error writing PM_CTRL");
return 0;
}
+ if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
+ u32 *filter_mask = kzalloc(32, GFP_KERNEL);
+ u32 command[2];
+ u32 offset[2];
+ u32 crc[4];
+ int i, filter = 0;
+
+ memset(command, 0, sizeof(command));
+ memset(offset, 0, sizeof(offset));
+ memset(crc, 0, sizeof(crc));
+
+ if (pdata->wolopts & WAKE_BCAST) {
+ const u8 bcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ netdev_info(dev->net, "enabling broadcast detection");
+ filter_mask[filter * 4] = 0x003F;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x05UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(bcast, 6, filter);
+ filter++;
+ }
+
+ if (pdata->wolopts & WAKE_MCAST) {
+ const u8 mcast[] = {0x01, 0x00, 0x5E};
+ netdev_info(dev->net, "enabling multicast detection");
+ filter_mask[filter * 4] = 0x0007;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x09UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(mcast, 3, filter);
+ filter++;
+ }
+
+ if (pdata->wolopts & WAKE_ARP) {
+ const u8 arp[] = {0x08, 0x06};
+ netdev_info(dev->net, "enabling ARP detection");
+ filter_mask[filter * 4] = 0x0003;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x05UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x0C << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(arp, 2, filter);
+ filter++;
+ }
+
+ if (pdata->wolopts & WAKE_UCAST) {
+ netdev_info(dev->net, "enabling unicast detection");
+ filter_mask[filter * 4] = 0x003F;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x01UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(dev->net->dev_addr, ETH_ALEN, filter);
+ filter++;
+ }
+
+ for (i = 0; i < (pdata->wuff_filter_count * 4); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
+ if (ret < 0)
+ kfree(filter_mask);
+ check_warn_return(ret, "Error writing WUFF");
+ }
+ kfree(filter_mask);
+
+ for (i = 0; i < (pdata->wuff_filter_count / 4); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
+ check_warn_return(ret, "Error writing WUFF");
+ }
+
+ for (i = 0; i < (pdata->wuff_filter_count / 4); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
+ check_warn_return(ret, "Error writing WUFF");
+ }
+
+ for (i = 0; i < (pdata->wuff_filter_count / 2); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
+ check_warn_return(ret, "Error writing WUFF");
+ }
+
+ /* clear any pending pattern match packet status */
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val |= WUCSR_WUFR_;
+
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+ }
+
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
val |= WUCSR_MPR_;
- ret = smsc95xx_write_reg(dev, WUCSR, val);
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
}
- /* enable/disable magic packup wake */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ /* enable/disable wakeup sources */
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
+ if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
+ netdev_info(dev->net, "enabling pattern match wakeup");
+ val |= WUCSR_WAKE_EN_;
+ } else {
+ netdev_info(dev->net, "disabling pattern match wakeup");
+ val &= ~WUCSR_WAKE_EN_;
+ }
+
if (pdata->wolopts & WAKE_MAGIC) {
netdev_info(dev->net, "enabling magic packet wakeup");
val |= WUCSR_MPEN_;
@@ -1133,41 +1230,41 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
val &= ~WUCSR_MPEN_;
}
- ret = smsc95xx_write_reg(dev, WUCSR, val);
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
/* enable wol wakeup source */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
check_warn_return(ret, "Error reading PM_CTRL");
val |= PM_CTL_WOL_EN_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
check_warn_return(ret, "Error writing PM_CTRL");
- /* enable receiver */
- smsc95xx_start_rx_path(dev);
+ /* enable receiver to enable frame reception */
+ smsc95xx_start_rx_path(dev, 1);
/* some wol options are enabled, so enter SUSPEND0 */
netdev_info(dev->net, "entering SUSPEND0 mode");
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
check_warn_return(ret, "Error reading PM_CTRL");
val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
val |= PM_CTL_SUS_MODE_0;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
check_warn_return(ret, "Error writing PM_CTRL");
/* clear wol status */
val &= ~PM_CTL_WUPS_;
val |= PM_CTL_WUPS_WOL_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
check_warn_return(ret, "Error writing PM_CTRL");
/* read back PM_CTRL */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
check_warn_return(ret, "Error reading PM_CTRL");
smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
@@ -1184,26 +1281,26 @@ static int smsc95xx_resume(struct usb_interface *intf)
BUG_ON(!dev);
- if (pdata->wolopts & WAKE_MAGIC) {
+ if (pdata->wolopts) {
smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
- /* Disable magic packup wake */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ /* clear wake-up sources */
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
check_warn_return(ret, "Error reading WUCSR");
- val &= ~WUCSR_MPEN_;
+ val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
- ret = smsc95xx_write_reg(dev, WUCSR, val);
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
check_warn_return(ret, "Error writing WUCSR");
/* clear wake-up status */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
check_warn_return(ret, "Error reading PM_CTRL");
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
check_warn_return(ret, "Error writing PM_CTRL");
}
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 2ff9815..1f86269 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -53,6 +53,8 @@
#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_ID_9500_ (0x9500)
+#define ID_REV_CHIP_ID_9500A_ (0x9E00)
+#define ID_REV_CHIP_ID_9512_ (0xEC00)
#define INT_STS (0x08)
#define INT_STS_TX_STOP_ (0x00020000)
@@ -203,8 +205,11 @@
#define VLAN2 (0x124)
#define WUFF (0x128)
+#define LAN9500_WUFF_NUM (4)
+#define LAN9500A_WUFF_NUM (8)
#define WUCSR (0x12C)
+#define WUCSR_WFF_PTR_RST_ (0x80000000)
#define WUCSR_GUE_ (0x00000200)
#define WUCSR_WUFR_ (0x00000040)
#define WUCSR_MPR_ (0x00000020)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index edb81ed..c04110b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1616,6 +1616,202 @@ void usbnet_device_suggests_idle(struct usbnet *dev)
EXPORT_SYMBOL(usbnet_device_suggests_idle);
/*-------------------------------------------------------------------------*/
+static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size)
+{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
+ " value=0x%04x index=0x%04x size=%d\n",
+ cmd, reqtype, value, index, size);
+
+ if (data) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ cmd, reqtype, value, index, buf, size,
+ USB_CTRL_GET_TIMEOUT);
+ if (err > 0 && err <= size)
+ memcpy(data, buf, err);
+ kfree(buf);
+out:
+ return err;
+}
+
+static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data,
+ u16 size)
+{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
+ " value=0x%04x index=0x%04x size=%d\n",
+ cmd, reqtype, value, index, size);
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ cmd, reqtype, value, index, buf, size,
+ USB_CTRL_SET_TIMEOUT);
+ kfree(buf);
+
+out:
+ return err;
+}
+
+/*
+ * The function can't be called inside suspend/resume callback,
+ * otherwise deadlock will be caused.
+ */
+int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size)
+{
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+ ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_read_cmd);
+
+/*
+ * The function can't be called inside suspend/resume callback,
+ * otherwise deadlock will be caused.
+ */
+int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size)
+{
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+ ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd);
+
+/*
+ * The function can be called inside suspend/resume callback safely
+ * and should only be called by suspend/resume callback generally.
+ */
+int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size)
+{
+ return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+}
+EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
+
+/*
+ * The function can be called inside suspend/resume callback safely
+ * and should only be called by suspend/resume callback generally.
+ */
+int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data,
+ u16 size)
+{
+ return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
+
+static void usbnet_async_cmd_cb(struct urb *urb)
+{
+ struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
+ int status = urb->status;
+
+ if (status < 0)
+ dev_dbg(&urb->dev->dev, "%s failed with %d",
+ __func__, status);
+
+ kfree(req);
+ usb_free_urb(urb);
+}
+
+/*
+ * The caller must make sure that device can't be put into suspend
+ * state until the control URB completes.
+ */
+int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size)
+{
+ struct usb_ctrlrequest *req = NULL;
+ struct urb *urb;
+ int err = -ENOMEM;
+ void *buf = NULL;
+
+ netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
+ " value=0x%04x index=0x%04x size=%d\n",
+ cmd, reqtype, value, index, size);
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(dev->net, "Error allocating URB in"
+ " %s!\n", __func__);
+ goto fail;
+ }
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_ATOMIC);
+ if (!buf) {
+ netdev_err(dev->net, "Error allocating buffer"
+ " in %s!\n", __func__);
+ goto fail_free;
+ }
+ }
+
+ req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!req) {
+ netdev_err(dev->net, "Failed to allocate memory for %s\n",
+ __func__);
+ goto fail_free_buf;
+ }
+
+ req->bRequestType = reqtype;
+ req->bRequest = cmd;
+ req->wValue = cpu_to_le16(value);
+ req->wIndex = cpu_to_le16(index);
+ req->wLength = cpu_to_le16(size);
+
+ usb_fill_control_urb(urb, dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ (void *)req, buf, size,
+ usbnet_async_cmd_cb, req);
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ netdev_err(dev->net, "Error submitting the control"
+ " message: status=%d\n", err);
+ goto fail_free;
+ }
+ return 0;
+
+fail_free_buf:
+ kfree(buf);
+fail_free:
+ kfree(req);
+ usb_free_urb(urb);
+fail:
+ return err;
+
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
+/*-------------------------------------------------------------------------*/
static int __init usbnet_init(void)
{
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e522ff7..24f6b27 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -264,6 +264,7 @@ static void veth_setup(struct net_device *dev)
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbf8b06..26c502e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -212,8 +212,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* the case of a broken device.
*/
if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
- if (net_ratelimit())
- pr_debug("%s: too much data\n", skb->dev->name);
+ net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
dev_kfree_skb(skb);
return NULL;
}
@@ -333,9 +332,8 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
- if (net_ratelimit())
- printk(KERN_WARNING "%s: bad gso type %u.\n",
- dev->name, hdr->hdr.gso_type);
+ net_warn_ratelimited("%s: bad gso type %u.\n",
+ dev->name, hdr->hdr.gso_type);
goto frame_err;
}
@@ -344,9 +342,7 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
- if (net_ratelimit())
- printk(KERN_WARNING "%s: zero gso size.\n",
- dev->name);
+ net_warn_ratelimited("%s: zero gso size.\n", dev->name);
goto frame_err;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0ae1bcc..e4a192b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1094,10 +1094,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
- return vmxnet3_tq_xmit(skb,
- &adapter->tx_queue[skb->queue_mapping],
- adapter, netdev);
+ BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+ return vmxnet3_tq_xmit(skb,
+ &adapter->tx_queue[skb->queue_mapping],
+ adapter, netdev);
}
@@ -1243,8 +1243,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skb_reserve(new_skb, NET_IP_ALIGN);
rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev,
- rbi->skb->data, rbi->len,
- PCI_DMA_FROMDEVICE);
+ rbi->skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
@@ -1331,14 +1331,14 @@ rcd_done:
/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(adapter,
- rxprod_reg[ring_idx] + rq->qid * 8,
- ring->next2fill);
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ ring->next2fill);
rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
vmxnet3_getRxComp(rcd,
- &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
+ &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
return num_rxd;
@@ -1922,7 +1922,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
free_irq(adapter->pdev->irq, adapter->netdev);
break;
default:
- BUG_ON(true);
+ BUG();
}
}
@@ -2949,11 +2949,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_DriverShared),
- &adapter->shared_pa);
+ sizeof(struct Vmxnet3_DriverShared),
+ &adapter->shared_pa);
if (!adapter->shared) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ pci_name(pdev));
err = -ENOMEM;
goto err_alloc_shared;
}
@@ -2964,16 +2964,16 @@ vmxnet3_probe_device(struct pci_dev *pdev,
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
- &adapter->queue_desc_pa);
+ &adapter->queue_desc_pa);
if (!adapter->tqd_start) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ pci_name(pdev));
err = -ENOMEM;
goto err_alloc_queue_desc;
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
- adapter->num_tx_queues);
+ adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
@@ -3019,7 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->dev_number = atomic_read(&devices_found);
- adapter->share_intr = irq_share_mode;
+ adapter->share_intr = irq_share_mode;
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
adapter->num_tx_queues != adapter->num_rx_queues)
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
@@ -3065,7 +3065,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (err) {
printk(KERN_ERR "Failed to register adapter %s\n",
- pci_name(pdev));
+ pci_name(pdev));
goto err_register;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8b5c619..6898a79 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -769,7 +769,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_set_owner(dev, skb);
- /* See __IPTUNNEL_XMIT */
+ /* See iptunnel_xmit() */
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, &rt->dst, NULL);
@@ -1111,6 +1111,9 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
+ if (data[IFLA_VXLAN_TTL])
+ vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+
if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
vxlan->learn = true;
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index eac709b..df70248 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -52,9 +52,9 @@ endif
quiet_cmd_build_wanxlfw = BLD FW $@
cmd_build_wanxlfw = \
- $(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
+ $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
$(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
- hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
+ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
$(obj)/wanxlfw.inc: $(src)/wanxlfw.S
diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S
index 73aae2b..21565d5 100644
--- a/drivers/net/wan/wanxlfw.S
+++ b/drivers/net/wan/wanxlfw.S
@@ -35,6 +35,7 @@
*/
#include <linux/hdlc.h>
+#include <linux/hdlc/ioctl.h>
#include "wanxl.h"
/* memory addresses and offsets */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3cd05a71..57f7db1 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -7433,7 +7433,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
num_null_ies++;
break;
- case WLAN_EID_GENERIC:
+ case WLAN_EID_VENDOR_SPECIFIC:
if (ie[1] >= 4 &&
ie[2] == 0x00 &&
ie[3] == 0x50 &&
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 0960224..c25dcf1 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -26,5 +26,6 @@ source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
+source "drivers/net/wireless/ath/ar5523/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d716b74..1e18621 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
+obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
new file mode 100644
index 0000000..11d99ee
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -0,0 +1,7 @@
+config AR5523
+ tristate "Atheros AR5523 wireless driver support"
+ depends on MAC80211 && USB
+ select FW_LOADER
+ ---help---
+ This module add support for AR5523 based USB dongles such as D-Link
+ DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
new file mode 100644
index 0000000..ebf7f3b
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AR5523) := ar5523.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
new file mode 100644
index 0000000..f782b6e
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -0,0 +1,1806 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This driver is based on the uath driver written by Damien Bergamini for
+ * OpenBSD, who did black-box analysis of the Windows binary driver to find
+ * out how the hardware works. It contains a lot magic numbers because of
+ * that and only has minimal functionality.
+ */
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+
+#include "ar5523.h"
+#include "ar5523_hw.h"
+
+/*
+ * Various supported device vendors/products.
+ * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g
+ */
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar);
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
+
+static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
+ struct ar5523_tx_cmd *cmd)
+{
+ int dlen, olen;
+ u32 *rp;
+
+ dlen = hdr->len - sizeof(*hdr);
+
+ if (dlen < 0) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ ar5523_dbg(ar, "Code = %d len = %d\n", hdr->code & 0xff, dlen);
+
+ rp = (u32 *)(hdr + 1);
+ if (dlen >= sizeof(u32)) {
+ olen = be32_to_cpu(rp[0]);
+ dlen -= sizeof(u32);
+ if (olen == 0) {
+ /* convention is 0 =>'s one word */
+ olen = sizeof(u32);
+ }
+ } else
+ olen = 0;
+
+ if (cmd->odata) {
+ if (cmd->olen < olen) {
+ ar5523_err(ar, "olen to small %d < %d\n",
+ cmd->olen, olen);
+ cmd->olen = 0;
+ cmd->res = -EOVERFLOW;
+ } else {
+ cmd->olen = olen;
+ memcpy(cmd->odata, &rp[1], olen);
+ cmd->res = 0;
+ }
+ }
+
+out:
+ complete(&cmd->done);
+}
+
+static void ar5523_cmd_rx_cb(struct urb *urb)
+{
+ struct ar5523 *ar = urb->context;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
+ int dlen;
+
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "RX USB error %d.\n", urb->status);
+ goto skip;
+ }
+
+ if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
+ ar5523_err(ar, "RX USB to short.\n");
+ goto skip;
+ }
+
+ ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
+ be32_to_cpu(hdr->code) & 0xff, hdr->priv);
+
+ hdr->code = be32_to_cpu(hdr->code);
+ hdr->len = be32_to_cpu(hdr->len);
+
+ switch (hdr->code & 0xff) {
+ default:
+ /* reply to a read command */
+ if (hdr->priv != AR5523_CMD_ID) {
+ ar5523_err(ar, "Unexpected command id: %02x\n",
+ hdr->code & 0xff);
+ goto skip;
+ }
+ ar5523_read_reply(ar, hdr, cmd);
+ break;
+
+ case WDCMSG_DEVICE_AVAIL:
+ ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
+ cmd->res = 0;
+ cmd->olen = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_SEND_COMPLETE:
+ ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
+ atomic_read(&ar->tx_nr_pending));
+ if (!test_bit(AR5523_HW_UP, &ar->flags))
+ ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
+ else {
+ mod_timer(&ar->tx_wd_timer,
+ jiffies + AR5523_TX_WD_TIMEOUT);
+ ar5523_data_tx_pkt_put(ar);
+
+ }
+ break;
+
+ case WDCMSG_TARGET_START:
+ /* This command returns a bogus id so it needs special
+ handling */
+ dlen = hdr->len - sizeof(*hdr);
+ if (dlen != (int)sizeof(u32)) {
+ ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
+ return;
+ }
+ memcpy(cmd->odata, hdr + 1, sizeof(u32));
+ cmd->olen = sizeof(u32);
+ cmd->res = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_STATS_UPDATE:
+ ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
+ break;
+ }
+
+skip:
+ ar5523_submit_rx_cmd(ar);
+}
+
+static int ar5523_alloc_rx_cmd(struct ar5523 *ar)
+{
+ ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ar->rx_cmd_urb)
+ return -ENOMEM;
+
+ ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ GFP_KERNEL,
+ &ar->rx_cmd_urb->transfer_dma);
+ if (!ar->rx_cmd_buf) {
+ usb_free_urb(ar->rx_cmd_urb);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ar5523_cancel_rx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->rx_cmd_urb);
+}
+
+static void ar5523_free_rx_cmd(struct ar5523 *ar)
+{
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma);
+ usb_free_urb(ar->rx_cmd_urb);
+}
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar)
+{
+ int error;
+
+ usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
+ ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
+ AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
+ ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
+ if (error) {
+ if (error != -ENODEV)
+ ar5523_err(ar, "error %d when submitting rx urb\n",
+ error);
+ return error;
+ }
+ return 0;
+}
+
+/*
+ * Command submitted cb
+ */
+static void ar5523_cmd_tx_cb(struct urb *urb)
+{
+ struct ar5523_tx_cmd *cmd = urb->context;
+ struct ar5523 *ar = cmd->ar;
+
+ if (urb->status) {
+ ar5523_err(ar, "Failed to TX command. Status = %d\n",
+ urb->status);
+ cmd->res = urb->status;
+ complete(&cmd->done);
+ return;
+ }
+
+ if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
+ cmd->res = 0;
+ complete(&cmd->done);
+ }
+}
+
+static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ struct ar5523_cmd_hdr *hdr;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ int xferlen, error;
+
+ /* always bulk-out a multiple of 4 bytes */
+ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;
+
+ hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx;
+ memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
+ hdr->len = cpu_to_be32(xferlen);
+ hdr->code = cpu_to_be32(code);
+ hdr->priv = AR5523_CMD_ID;
+
+ if (flags & AR5523_CMD_FLAG_MAGIC)
+ hdr->magic = cpu_to_be32(1 << 24);
+ memcpy(hdr + 1, idata, ilen);
+
+ cmd->odata = odata;
+ cmd->olen = olen;
+ cmd->flags = flags;
+
+ ar5523_dbg(ar, "do cmd %02x\n", code);
+
+ usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
+ cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
+ cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "could not send command 0x%x, error=%d\n",
+ code, error);
+ return error;
+ }
+
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+ cmd->odata = NULL;
+ ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ code);
+ cmd->res = -ETIMEDOUT;
+ }
+ return cmd->res;
+}
+
+static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data,
+ int len, int flags)
+{
+ flags &= ~AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, data, len, NULL, 0, flags);
+}
+
+static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ flags |= AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags);
+}
+
+static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(0); /* 0 = single write */
+ *(u32 *)write.data = cpu_to_be32(val);
+
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ 3 * sizeof(u32), 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write register 0x%02x\n", reg);
+ return error;
+}
+
+static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
+ int len)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(len);
+ memcpy(write.data, data, len);
+
+ /* properly handle the case where len is zero (reset) */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
+ len, reg);
+ return error;
+}
+
+static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
+ int olen)
+{
+ int error;
+
+ which = cpu_to_be32(which);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
+ &which, sizeof(which), odata, olen, AR5523_CMD_FLAG_MAGIC);
+ if (error != 0)
+ ar5523_err(ar, "could not read EEPROM offset 0x%02x\n",
+ be32_to_cpu(which));
+ return error;
+}
+
+static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
+{
+ int error;
+
+ cap = cpu_to_be32(cap);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY,
+ &cap, sizeof(cap), val, sizeof(u32), AR5523_CMD_FLAG_MAGIC);
+ if (error != 0) {
+ ar5523_err(ar, "could not read capability %u\n",
+ be32_to_cpu(cap));
+ return error;
+ }
+ *val = be32_to_cpu(*val);
+ return error;
+}
+
+static int ar5523_get_devcap(struct ar5523 *ar)
+{
+#define GETCAP(x) do { \
+ error = ar5523_get_capability(ar, x, &cap); \
+ if (error != 0) \
+ return error; \
+ ar5523_info(ar, "Cap: " \
+ "%s=0x%08x\n", #x, cap); \
+} while (0)
+ int error;
+ u32 cap;
+
+ /* collect device capabilities */
+ GETCAP(CAP_TARGET_VERSION);
+ GETCAP(CAP_TARGET_REVISION);
+ GETCAP(CAP_MAC_VERSION);
+ GETCAP(CAP_MAC_REVISION);
+ GETCAP(CAP_PHY_REVISION);
+ GETCAP(CAP_ANALOG_5GHz_REVISION);
+ GETCAP(CAP_ANALOG_2GHz_REVISION);
+
+ GETCAP(CAP_REG_DOMAIN);
+ GETCAP(CAP_REG_CAP_BITS);
+ GETCAP(CAP_WIRELESS_MODES);
+ GETCAP(CAP_CHAN_SPREAD_SUPPORT);
+ GETCAP(CAP_COMPRESS_SUPPORT);
+ GETCAP(CAP_BURST_SUPPORT);
+ GETCAP(CAP_FAST_FRAMES_SUPPORT);
+ GETCAP(CAP_CHAP_TUNING_SUPPORT);
+ GETCAP(CAP_TURBOG_SUPPORT);
+ GETCAP(CAP_TURBO_PRIME_SUPPORT);
+ GETCAP(CAP_DEVICE_TYPE);
+ GETCAP(CAP_WME_SUPPORT);
+ GETCAP(CAP_TOTAL_QUEUES);
+ GETCAP(CAP_CONNECTION_ID_MAX);
+
+ GETCAP(CAP_LOW_5GHZ_CHAN);
+ GETCAP(CAP_HIGH_5GHZ_CHAN);
+ GETCAP(CAP_LOW_2GHZ_CHAN);
+ GETCAP(CAP_HIGH_2GHZ_CHAN);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_5G);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_2G);
+
+ GETCAP(CAP_CIPHER_AES_CCM);
+ GETCAP(CAP_CIPHER_TKIP);
+ GETCAP(CAP_MIC_TKIP);
+ return 0;
+}
+
+static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode)
+{
+ struct ar5523_cmd_ledsteady led;
+
+ led.lednum = cpu_to_be32(lednum);
+ led.ledmode = cpu_to_be32(ledmode);
+
+ ar5523_dbg(ar, "set %s led %s (steady)\n",
+ (lednum == UATH_LED_LINK) ? "link" : "activity",
+ ledmode ? "on" : "off");
+ return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led),
+ 0);
+}
+
+static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op)
+{
+ struct ar5523_cmd_rx_filter rxfilter;
+
+ rxfilter.bits = cpu_to_be32(bits);
+ rxfilter.op = cpu_to_be32(op);
+
+ ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op);
+ return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter,
+ sizeof(rxfilter), 0);
+}
+
+static int ar5523_reset_tx_queues(struct ar5523 *ar)
+{
+ __be32 qid = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "resetting Tx queue\n");
+ return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE,
+ &qid, sizeof(qid), 0);
+}
+
+static int ar5523_set_chan(struct ar5523 *ar)
+{
+ struct ieee80211_conf *conf = &ar->hw->conf;
+
+ struct ar5523_cmd_reset reset;
+
+ memset(&reset, 0, sizeof(reset));
+ reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
+ reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
+ reset.freq = cpu_to_be32(conf->channel->center_freq);
+ reset.maxrdpower = cpu_to_be32(50); /* XXX */
+ reset.channelchange = cpu_to_be32(1);
+ reset.keeprccontent = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
+ be32_to_cpu(reset.flags),
+ conf->channel->center_freq);
+ return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
+}
+
+static int ar5523_queue_init(struct ar5523 *ar)
+{
+ struct ar5523_cmd_txq_setup qinfo;
+
+ ar5523_dbg(ar, "setting up Tx queue\n");
+ qinfo.qid = cpu_to_be32(0);
+ qinfo.len = cpu_to_be32(sizeof(qinfo.attr));
+ qinfo.attr.priority = cpu_to_be32(0); /* XXX */
+ qinfo.attr.aifs = cpu_to_be32(3);
+ qinfo.attr.logcwmin = cpu_to_be32(4);
+ qinfo.attr.logcwmax = cpu_to_be32(10);
+ qinfo.attr.bursttime = cpu_to_be32(0);
+ qinfo.attr.mode = cpu_to_be32(0);
+ qinfo.attr.qflags = cpu_to_be32(1); /* XXX? */
+ return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo,
+ sizeof(qinfo), 0);
+}
+
+static int ar5523_switch_chan(struct ar5523 *ar)
+{
+ int error;
+
+ error = ar5523_set_chan(ar);
+ if (error) {
+ ar5523_err(ar, "could not set chan, error %d\n", error);
+ goto out_err;
+ }
+
+ /* reset Tx rings */
+ error = ar5523_reset_tx_queues(ar);
+ if (error) {
+ ar5523_err(ar, "could not reset Tx queues, error %d\n",
+ error);
+ goto out_err;
+ }
+ /* set Tx rings WME properties */
+ error = ar5523_queue_init(ar);
+ if (error)
+ ar5523_err(ar, "could not init wme, error %d\n", error);
+
+out_err:
+ return error;
+}
+
+static void ar5523_rx_data_put(struct ar5523 *ar,
+ struct ar5523_rx_data *data)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_free);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+}
+
+static void ar5523_data_rx_cb(struct urb *urb)
+{
+ struct ar5523_rx_data *data = urb->context;
+ struct ar5523 *ar = data->ar;
+ struct ar5523_rx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_rx_status *rx_status;
+ u32 rxlen;
+ int usblen = urb->actual_length;
+ int hdrlen, pad;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "%s: USB err: %d\n", __func__,
+ urb->status);
+ goto skip;
+ }
+
+ if (usblen < AR5523_MIN_RXBUFSZ) {
+ ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
+ goto skip;
+ }
+
+ chunk = (struct ar5523_chunk *) data->skb->data;
+
+ if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
+ chunk->seqnum != 0) {
+ ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
+ chunk->seqnum, chunk->flags,
+ be16_to_cpu(chunk->length));
+ goto skip;
+ }
+
+ /* Rx descriptor is located at the end, 32-bit aligned */
+ desc = (struct ar5523_rx_desc *)
+ (data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
+
+ rxlen = be32_to_cpu(desc->len);
+ if (rxlen > ar->rxbufsz) {
+ ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
+ be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ if (!rxlen) {
+ ar5523_dbg(ar, "RX: rxlen is 0\n");
+ goto skip;
+ }
+
+ if (be32_to_cpu(desc->status) != 0) {
+ ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
+ be32_to_cpu(desc->status), be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ skb_reserve(data->skb, sizeof(*chunk));
+ skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
+ if (!IS_ALIGNED(hdrlen, 4)) {
+ ar5523_dbg(ar, "eek, alignment workaround activated\n");
+ pad = ALIGN(hdrlen, 4) - hdrlen;
+ memmove(data->skb->data + pad, data->skb->data, hdrlen);
+ skb_pull(data->skb, pad);
+ skb_put(data->skb, pad);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(data->skb);
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->freq = be32_to_cpu(desc->channel);
+ rx_status->band = hw->conf.channel->band;
+ rx_status->signal = -95 + be32_to_cpu(desc->rssi);
+
+ ieee80211_rx_irqsafe(hw, data->skb);
+ data->skb = NULL;
+
+skip:
+ if (data->skb) {
+ dev_kfree_skb_irq(data->skb);
+ data->skb = NULL;
+ }
+
+ ar5523_rx_data_put(ar, data);
+ if (atomic_inc_return(&ar->rx_data_free_cnt) >=
+ AR5523_RX_DATA_REFILL_COUNT &&
+ test_bit(AR5523_HW_UP, &ar->flags))
+ queue_work(ar->wq, &ar->rx_refill_work);
+}
+
+static void ar5523_rx_refill_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+
+ if (!list_empty(&ar->rx_data_free))
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ goto done;
+
+ data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
+ if (!data->skb) {
+ ar5523_err(ar, "could not allocate rx skbuff\n");
+ return;
+ }
+
+ usb_fill_bulk_urb(data->urb, ar->dev,
+ ar5523_data_rx_pipe(ar->dev), data->skb->data,
+ ar->rxbufsz, ar5523_data_rx_cb, data);
+
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_used);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+ atomic_dec(&ar->rx_data_free_cnt);
+
+ error = usb_submit_urb(data->urb, GFP_KERNEL);
+ if (error) {
+ kfree_skb(data->skb);
+ if (error != -ENODEV)
+ ar5523_err(ar, "Err sending rx data urb %d\n",
+ error);
+ ar5523_rx_data_put(ar, data);
+ atomic_inc(&ar->rx_data_free_cnt);
+ return;
+ }
+
+ } while (true);
+done:
+ return;
+}
+
+static void ar5523_cancel_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ if (!list_empty(&ar->rx_data_used))
+ data = (struct ar5523_rx_data *) ar->rx_data_used.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ usb_kill_urb(data->urb);
+ list_move(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ } while (data);
+}
+
+static void ar5523_free_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+
+ ar5523_cancel_rx_bufs(ar);
+ while (!list_empty(&ar->rx_data_free)) {
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ list_del(&data->list);
+ usb_free_urb(data->urb);
+ }
+}
+
+static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
+{
+ int i;
+
+ for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
+ struct ar5523_rx_data *data = &ar->rx_data[i];
+
+ data->ar = ar;
+ data->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!data->urb) {
+ ar5523_err(ar, "could not allocate rx data urb\n");
+ goto err;
+ }
+ list_add_tail(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ }
+ return 0;
+
+err:
+ ar5523_free_rx_bufs(ar);
+ return -ENOMEM;
+}
+
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
+{
+ atomic_dec(&ar->tx_nr_total);
+ if (!atomic_dec_return(&ar->tx_nr_pending)) {
+ del_timer(&ar->tx_wd_timer);
+ wake_up(&ar->tx_flush_waitq);
+ }
+
+ if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) {
+ ar5523_dbg(ar, "restart tx queue\n");
+ ieee80211_wake_queues(ar->hw);
+ }
+}
+
+static void ar5523_data_tx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = data->ar;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (urb->status) {
+ ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status);
+ ar5523_data_tx_pkt_put(ar);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+ }
+ usb_free_urb(urb);
+}
+
+static void ar5523_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = hw->priv;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "tx called\n");
+ if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
+ ar5523_dbg(ar, "tx queue full\n");
+ ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+ ieee80211_stop_queues(hw);
+ }
+
+ data->skb = skb;
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_pending);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ ieee80211_queue_work(ar->hw, &ar->tx_work);
+}
+
+static void ar5523_tx_work_locked(struct ar5523 *ar)
+{
+ struct ar5523_tx_data *data;
+ struct ar5523_tx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_tx_info *txi;
+ struct urb *urb;
+ struct sk_buff *skb;
+ int error = 0, paylen;
+ u32 txqid;
+ unsigned long flags;
+
+ BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ if (!list_empty(&ar->tx_queue_pending)) {
+ data = (struct ar5523_tx_data *)
+ ar->tx_queue_pending.next;
+ list_del(&data->list);
+ } else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ skb = data->skb;
+ txqid = 0;
+ txi = IEEE80211_SKB_CB(skb);
+ paylen = skb->len;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ar5523_err(ar, "Failed to allocate TX urb\n");
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ data->ar = ar;
+ data->urb = urb;
+
+ desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
+ chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));
+
+ chunk->seqnum = 0;
+ chunk->flags = UATH_CFLAGS_FINAL;
+ chunk->length = cpu_to_be16(skb->len);
+
+ desc->msglen = cpu_to_be32(skb->len);
+ desc->msgid = AR5523_DATA_ID;
+ desc->buflen = cpu_to_be32(paylen);
+ desc->type = cpu_to_be32(WDCMSG_SEND);
+ desc->flags = cpu_to_be32(UATH_TX_NOTIFY);
+
+ if (test_bit(AR5523_CONNECTED, &ar->flags))
+ desc->connid = cpu_to_be32(AR5523_ID_BSS);
+ else
+ desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);
+
+ if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ txqid |= UATH_TXQID_MINRATE;
+
+ desc->txqid = cpu_to_be32(txqid);
+
+ urb->transfer_flags = URB_ZERO_PACKET;
+ usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
+ skb->data, skb->len, ar5523_data_tx_cb, skb);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_submitted);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
+ atomic_inc(&ar->tx_nr_pending);
+
+ ar5523_dbg(ar, "TX Frame (%d pending)\n",
+ atomic_read(&ar->tx_nr_pending));
+ error = usb_submit_urb(urb, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "error %d when submitting tx urb\n",
+ error);
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ atomic_dec(&ar->tx_nr_pending);
+ ar5523_data_tx_pkt_put(ar);
+ usb_free_urb(urb);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } while (true);
+}
+
+static void ar5523_tx_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+ ar5523_tx_work_locked(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_tx_wd_timer(unsigned long arg)
+{
+ struct ar5523 *ar = (struct ar5523 *) arg;
+
+ ar5523_dbg(ar, "TX watchdog timer triggered\n");
+ ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
+}
+
+static void ar5523_tx_wd_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
+
+ /* Occasionally the TX queues stop responding. The only way to
+ * recover seems to be to reset the dongle.
+ */
+
+ mutex_lock(&ar->mutex);
+ ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+
+ ar5523_err(ar, "Will restart dongle.\n");
+ ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_flush_tx(struct ar5523 *ar)
+{
+ ar5523_tx_work_locked(ar);
+
+ /* Don't waste time trying to flush if USB is disconnected */
+ if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
+ return;
+ if (!wait_event_timeout(ar->tx_flush_waitq,
+ !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
+ ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+}
+
+static void ar5523_free_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx,
+ cmd->urb_tx->transfer_dma);
+ usb_free_urb(cmd->urb_tx);
+}
+
+static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ cmd->ar = ar;
+ init_completion(&cmd->done);
+
+ cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
+ if (!cmd->urb_tx) {
+ ar5523_err(ar, "could not allocate urb\n");
+ return -ENOMEM;
+ }
+ cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
+ GFP_KERNEL,
+ &cmd->urb_tx->transfer_dma);
+ if (!cmd->buf_tx) {
+ usb_free_urb(cmd->urb_tx);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * This function is called periodically (every second) when associated to
+ * query device statistics.
+ */
+static void ar5523_stat_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+
+ /*
+ * Send request for statistics asynchronously once a second. This
+ * seems to be important. Throughput is a lot better if this is done.
+ */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
+ if (error)
+ ar5523_err(ar, "could not query stats, error %d\n", error);
+ mutex_unlock(&ar->mutex);
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
+}
+
+/*
+ * Interface routines to the mac80211 stack.
+ */
+static int ar5523_start(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+ __be32 val;
+
+ ar5523_dbg(ar, "start called\n");
+
+ mutex_lock(&ar->mutex);
+ val = cpu_to_be32(0);
+ ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0);
+
+ /* set MAC address */
+ ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr,
+ ETH_ALEN);
+
+ /* XXX honor net80211 state */
+ ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001);
+ ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001);
+ ar5523_config(ar, CFG_ABOLT, 0x0000003f);
+ ar5523_config(ar, CFG_WME_ENABLED, 0x00000000);
+
+ ar5523_config(ar, CFG_SERVICE_TYPE, 1);
+ ar5523_config(ar, CFG_TP_SCALE, 0x00000000);
+ ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c);
+ ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c);
+ ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
+ ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000);
+ ar5523_config(ar, CFG_MODE_CTS, 0x00000002);
+
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0,
+ &val, sizeof(val), AR5523_CMD_FLAG_MAGIC);
+ if (error) {
+ ar5523_dbg(ar, "could not start target, error %d\n", error);
+ goto err;
+ }
+ ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n",
+ be32_to_cpu(val));
+
+ ar5523_switch_chan(ar);
+
+ val = cpu_to_be32(TARGET_DEVICE_AWAKE);
+ ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0);
+ /* XXX? check */
+ ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
+
+ set_bit(AR5523_HW_UP, &ar->flags);
+ queue_work(ar->wq, &ar->rx_refill_work);
+
+ /* enable Rx */
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar,
+ UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
+ UATH_FILTER_OP_SET);
+
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON);
+ ar5523_dbg(ar, "start OK\n");
+
+err:
+ mutex_unlock(&ar->mutex);
+ return error;
+}
+
+static void ar5523_stop(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "stop called\n");
+
+ cancel_delayed_work_sync(&ar->stat_work);
+ mutex_lock(&ar->mutex);
+ clear_bit(AR5523_HW_UP, &ar->flags);
+
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF);
+
+ ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
+
+ del_timer_sync(&ar->tx_wd_timer);
+ cancel_work_sync(&ar->tx_wd_work);
+ cancel_work_sync(&ar->rx_refill_work);
+ ar5523_cancel_rx_bufs(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ar5523 *ar = hw->priv;
+ int ret;
+
+ ar5523_dbg(ar, "set_rts_threshold called\n");
+ mutex_lock(&ar->mutex);
+
+ ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value);
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+}
+
+static void ar5523_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "flush called\n");
+ ar5523_flush_tx(ar);
+}
+
+static int ar5523_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "add interface called\n");
+
+ if (ar->vif) {
+ ar5523_dbg(ar, "invalid add_interface\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ar->vif = vif;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void ar5523_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "remove interface called\n");
+ ar->vif = NULL;
+}
+
+static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "config called\n");
+ mutex_lock(&ar->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ar5523_dbg(ar, "Do channel switch\n");
+ ar5523_flush_tx(ar);
+ ar5523_switch_chan(ar);
+ }
+ mutex_unlock(&ar->mutex);
+ return 0;
+}
+
+static int ar5523_get_wlan_mode(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_supported_band *band;
+ int bit;
+ struct ieee80211_sta *sta;
+ u32 sta_rate_set;
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ if (!sta) {
+ ar5523_info(ar, "STA not found!\n");
+ return WLAN_MODE_11b;
+ }
+ sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
+
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (sta_rate_set & 1) {
+ int rate = band->bitrates[bit].bitrate;
+ switch (rate) {
+ case 60:
+ case 90:
+ case 120:
+ case 180:
+ case 240:
+ case 360:
+ case 480:
+ case 540:
+ return WLAN_MODE_11g;
+ }
+ }
+ sta_rate_set >>= 1;
+ }
+ return WLAN_MODE_11b;
+}
+
+static void ar5523_create_rateset(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ar5523_cmd_rateset *rs,
+ bool basic)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta *sta;
+ int bit, i = 0;
+ u32 sta_rate_set, basic_rate_set;
+
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ basic_rate_set = bss_conf->basic_rates;
+ if (!sta) {
+ ar5523_info(ar, "STA not found. Cannot set rates\n");
+ sta_rate_set = bss_conf->basic_rates;
+ }
+ sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
+
+ ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ BUG_ON(i >= AR5523_MAX_NRATES);
+ ar5523_dbg(ar, "Considering rate %d : %d\n",
+ band->bitrates[bit].hw_value, sta_rate_set & 1);
+ if (sta_rate_set & 1) {
+ rs->set[i] = band->bitrates[bit].hw_value;
+ if (basic_rate_set & 1 && basic)
+ rs->set[i] |= 0x80;
+ i++;
+ }
+ sta_rate_set >>= 1;
+ basic_rate_set >>= 1;
+ }
+
+ rs->length = i;
+}
+
+static int ar5523_set_basic_rates(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_rates rates;
+
+ memset(&rates, 0, sizeof(rates));
+ rates.connid = cpu_to_be32(2); /* XXX */
+ rates.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+ ar5523_create_rateset(ar, bss, &rates.rateset, true);
+
+ return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates,
+ sizeof(rates), 0);
+}
+
+static int ar5523_create_connection(struct ar5523 *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_create_connection create;
+ int wlan_mode;
+
+ memset(&create, 0, sizeof(create));
+ create.connid = cpu_to_be32(2);
+ create.bssid = cpu_to_be32(0);
+ /* XXX packed or not? */
+ create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+
+ ar5523_create_rateset(ar, bss, &create.connattr.rateset, false);
+
+ wlan_mode = ar5523_get_wlan_mode(ar, bss);
+ create.connattr.wlanmode = cpu_to_be32(wlan_mode);
+
+ return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create,
+ sizeof(create), 0);
+}
+
+static int ar5523_write_associd(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_set_associd associd;
+
+ memset(&associd, 0, sizeof(associd));
+ associd.defaultrateix = cpu_to_be32(0); /* XXX */
+ associd.associd = cpu_to_be32(bss->aid);
+ associd.timoffset = cpu_to_be32(0x3b); /* XXX */
+ memcpy(associd.bssid, bss->bssid, ETH_ALEN);
+ return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
+ sizeof(associd), 0);
+}
+
+static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss,
+ u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+
+ ar5523_dbg(ar, "bss_info_changed called\n");
+ mutex_lock(&ar->mutex);
+
+ if (!(changed & BSS_CHANGED_ASSOC))
+ goto out_unlock;
+
+ if (bss->assoc) {
+ error = ar5523_create_connection(ar, vif, bss);
+ if (error) {
+ ar5523_err(ar, "could not create connection\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_set_basic_rates(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set negotiated rate set\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_write_associd(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set association\n");
+ goto out_unlock;
+ }
+
+ /* turn link LED on */
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
+ set_bit(AR5523_CONNECTED, &ar->flags);
+ ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);
+
+ } else {
+ cancel_delayed_work(&ar->stat_work);
+ clear_bit(AR5523_CONNECTED, &ar->flags);
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ }
+
+out_unlock:
+ mutex_unlock(&ar->mutex);
+
+}
+
+#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_FCSFAIL | \
+ FIF_OTHER_BSS)
+
+static void ar5523_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ar5523 *ar = hw->priv;
+ u32 filter = 0;
+
+ ar5523_dbg(ar, "configure_filter called\n");
+ mutex_lock(&ar->mutex);
+ ar5523_flush_tx(ar);
+
+ *total_flags &= AR5523_SUPPORTED_FILTERS;
+
+ /* The filters seems strange. UATH_FILTER_RX_BCAST and
+ * UATH_FILTER_RX_MCAST does not result in those frames being RXed.
+ * The only way I have found to get [mb]cast frames seems to be
+ * to set UATH_FILTER_RX_PROM. */
+ filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
+ UATH_FILTER_RX_PROM;
+
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET);
+
+ mutex_unlock(&ar->mutex);
+}
+
+static const struct ieee80211_ops ar5523_ops = {
+ .start = ar5523_start,
+ .stop = ar5523_stop,
+ .tx = ar5523_tx,
+ .set_rts_threshold = ar5523_set_rts_threshold,
+ .add_interface = ar5523_add_interface,
+ .remove_interface = ar5523_remove_interface,
+ .config = ar5523_hwconfig,
+ .bss_info_changed = ar5523_bss_info_changed,
+ .configure_filter = ar5523_configure_filter,
+ .flush = ar5523_flush,
+};
+
+static int ar5523_host_available(struct ar5523 *ar)
+{
+ struct ar5523_cmd_host_available setup;
+
+ /* inform target the host is available */
+ setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR);
+ setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR);
+ setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH);
+ setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD);
+ return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE,
+ &setup, sizeof(setup), NULL, 0, 0);
+}
+
+static int ar5523_get_devstatus(struct ar5523 *ar)
+{
+ u8 macaddr[ETH_ALEN];
+ int error;
+
+ /* retrieve MAC address */
+ error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
+ if (error) {
+ ar5523_err(ar, "could not read MAC address\n");
+ return error;
+ }
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);
+
+ error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
+ &ar->serial[0], sizeof(ar->serial));
+ if (error) {
+ ar5523_err(ar, "could not read device serial number\n");
+ return error;
+ }
+ return 0;
+}
+
+#define AR5523_SANE_RXBUFSZ 2000
+
+static int ar5523_get_max_rxsz(struct ar5523 *ar)
+{
+ int error;
+ __be32 rxsize;
+
+ /* Get max rx size */
+ error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
+ sizeof(rxsize));
+ if (error != 0) {
+ ar5523_err(ar, "could not read max RX size\n");
+ return error;
+ }
+
+ ar->rxbufsz = be32_to_cpu(rxsize);
+
+ if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
+ ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
+ AR5523_SANE_RXBUFSZ);
+ ar->rxbufsz = AR5523_SANE_RXBUFSZ;
+ }
+
+ ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
+ return 0;
+}
+
+/*
+ * This is copied from rtl818x, but we should probably move this
+ * to common code as in OpenBSD.
+ */
+static const struct ieee80211_rate ar5523_rates[] = {
+ { .bitrate = 10, .hw_value = 2, },
+ { .bitrate = 20, .hw_value = 4 },
+ { .bitrate = 55, .hw_value = 11, },
+ { .bitrate = 110, .hw_value = 22, },
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+};
+
+static const struct ieee80211_channel ar5523_channels[] = {
+ { .center_freq = 2412 },
+ { .center_freq = 2417 },
+ { .center_freq = 2422 },
+ { .center_freq = 2427 },
+ { .center_freq = 2432 },
+ { .center_freq = 2437 },
+ { .center_freq = 2442 },
+ { .center_freq = 2447 },
+ { .center_freq = 2452 },
+ { .center_freq = 2457 },
+ { .center_freq = 2462 },
+ { .center_freq = 2467 },
+ { .center_freq = 2472 },
+ { .center_freq = 2484 },
+};
+
+static int ar5523_init_modes(struct ar5523 *ar)
+{
+ BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels));
+ BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates));
+
+ memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
+ memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
+
+ ar->band.band = IEEE80211_BAND_2GHZ;
+ ar->band.channels = ar->channels;
+ ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
+ ar->band.bitrates = ar->rates;
+ ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+ return 0;
+}
+
+/*
+ * Load the MIPS R4000 microcode into the device. Once the image is loaded,
+ * the device will detach itself from the bus and reattach later with a new
+ * product Id (a la ezusb).
+ */
+static int ar5523_load_firmware(struct usb_device *dev)
+{
+ struct ar5523_fwblock *txblock, *rxblock;
+ const struct firmware *fw;
+ void *fwbuf;
+ int len, offset;
+ int foolen; /* XXX(hch): handle short transfers */
+ int error = -ENXIO;
+
+ if (request_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) {
+ dev_err(&dev->dev, "no firmware found: %s\n",
+ AR5523_FIRMWARE_FILE);
+ return -ENOENT;
+ }
+
+ txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ if (!txblock)
+ goto out;
+
+ rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL);
+ if (!rxblock)
+ goto out_free_txblock;
+
+ fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL);
+ if (!fwbuf)
+ goto out_free_rxblock;
+
+ memset(txblock, 0, sizeof(struct ar5523_fwblock));
+ txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
+ txblock->total = cpu_to_be32(fw->size);
+
+ offset = 0;
+ len = fw->size;
+ while (len > 0) {
+ int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE);
+
+ txblock->remain = cpu_to_be32(len - mlen);
+ txblock->len = cpu_to_be32(mlen);
+
+ /* send firmware block meta-data */
+ error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev),
+ txblock, sizeof(*txblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block info\n");
+ goto out_free_fwbuf;
+ }
+
+ /* send firmware block data */
+ memcpy(fwbuf, fw->data + offset, mlen);
+ error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev),
+ fwbuf, mlen, &foolen,
+ AR5523_DATA_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block data\n");
+ goto out_free_fwbuf;
+ }
+
+ /* wait for ack from firmware */
+ error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev),
+ rxblock, sizeof(*rxblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not read firmware answer\n");
+ goto out_free_fwbuf;
+ }
+
+ len -= mlen;
+ offset += mlen;
+ }
+
+ /*
+ * Set the error to -ENXIO to make sure we continue probing for
+ * a driver.
+ */
+ error = -ENXIO;
+
+ out_free_fwbuf:
+ kfree(fwbuf);
+ out_free_rxblock:
+ kfree(rxblock);
+ out_free_txblock:
+ kfree(txblock);
+ out:
+ release_firmware(fw);
+ return error;
+}
+
+static int ar5523_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ieee80211_hw *hw;
+ struct ar5523 *ar;
+ int error = -ENOMEM;
+
+ /*
+ * Load firmware if the device requires it. This will return
+ * -ENXIO on success and we'll get called back afer the usb
+ * id changes to indicate that the firmware is present.
+ */
+ if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
+ return ar5523_load_firmware(dev);
+
+
+ hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
+ if (!hw)
+ goto out;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->dev = dev;
+ mutex_init(&ar->mutex);
+
+ INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
+ init_timer(&ar->tx_wd_timer);
+ setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
+ INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
+ INIT_WORK(&ar->tx_work, ar5523_tx_work);
+ INIT_LIST_HEAD(&ar->tx_queue_pending);
+ INIT_LIST_HEAD(&ar->tx_queue_submitted);
+ spin_lock_init(&ar->tx_data_list_lock);
+ atomic_set(&ar->tx_nr_total, 0);
+ atomic_set(&ar->tx_nr_pending, 0);
+ init_waitqueue_head(&ar->tx_flush_waitq);
+
+ atomic_set(&ar->rx_data_free_cnt, 0);
+ INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
+ INIT_LIST_HEAD(&ar->rx_data_free);
+ INIT_LIST_HEAD(&ar->rx_data_used);
+ spin_lock_init(&ar->rx_data_list_lock);
+
+ ar->wq = create_singlethread_workqueue("ar5523");
+ if (!ar->wq) {
+ ar5523_err(ar, "Could not create wq\n");
+ goto out_free_ar;
+ }
+
+ error = ar5523_alloc_rx_bufs(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx buffers\n");
+ goto out_free_wq;
+ }
+
+ error = ar5523_alloc_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx command buffers\n");
+ goto out_free_rx_bufs;
+ }
+
+ error = ar5523_alloc_tx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate tx command buffers\n");
+ goto out_free_rx_cmd;
+ }
+
+ error = ar5523_submit_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Failed to submit rx cmd\n");
+ goto out_free_tx_cmd;
+ }
+
+ /*
+ * We're now ready to send/receive firmware commands.
+ */
+ error = ar5523_host_available(ar);
+ if (error) {
+ ar5523_err(ar, "could not initialize adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_max_rxsz(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devcap(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devstatus(ar);
+ if (error != 0) {
+ ar5523_err(ar, "could not get device status\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
+ (id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
+
+ ar->vif = NULL;
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL;
+ hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
+ sizeof(struct ar5523_chunk);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->queues = 1;
+
+ error = ar5523_init_modes(ar);
+ if (error)
+ goto out_cancel_rx_cmd;
+
+ usb_set_intfdata(intf, hw);
+
+ error = ieee80211_register_hw(hw);
+ if (error) {
+ ar5523_err(ar, "could not register device\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "Found and initialized AR5523 device\n");
+ return 0;
+
+out_cancel_rx_cmd:
+ ar5523_cancel_rx_cmd(ar);
+out_free_tx_cmd:
+ ar5523_free_tx_cmd(ar);
+out_free_rx_cmd:
+ ar5523_free_rx_cmd(ar);
+out_free_rx_bufs:
+ ar5523_free_rx_bufs(ar);
+out_free_wq:
+ destroy_workqueue(ar->wq);
+out_free_ar:
+ ieee80211_free_hw(hw);
+out:
+ return error;
+}
+
+static void ar5523_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "detaching\n");
+ set_bit(AR5523_USB_DISCONNECTED, &ar->flags);
+
+ ieee80211_unregister_hw(hw);
+
+ ar5523_cancel_rx_cmd(ar);
+ ar5523_free_tx_cmd(ar);
+ ar5523_free_rx_cmd(ar);
+ ar5523_free_rx_bufs(ar);
+
+ destroy_workqueue(ar->wq);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+}
+
+#define AR5523_DEVICE_UG(vendor, device) \
+ { USB_DEVICE((vendor), (device)) }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_PRE_FIRMWARE }
+#define AR5523_DEVICE_UX(vendor, device) \
+ { USB_DEVICE((vendor), (device)), \
+ .driver_info = AR5523_FLAG_ABG }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE }
+
+static struct usb_device_id ar5523_id_table[] = {
+ AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */
+ AR5523_DEVICE_UX(0x0cf3, 0x0005), /* Atheros2 / AR5523_3 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7801), /* Conceptronic / AR5523_1 */
+ AR5523_DEVICE_UX(0x0d8e, 0x7811), /* Conceptronic / AR5523_2 */
+ AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
+ AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
+ AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
+ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
+ AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ (CyberTAN Technology) */
+ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
+ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7802), /* Globalsun / AR5523_3 */
+ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
+ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
+ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
+ AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
+ AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
+ AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
+ AR5523_DEVICE_UG(0x1385, 0x4250), /* Netgear3 / WG111T (2) */
+ AR5523_DEVICE_UG(0x1385, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x1385, 0x5f02), /* Netgear / WPN111 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ar5523_id_table);
+
+static struct usb_driver ar5523_driver = {
+ .name = "ar5523",
+ .id_table = ar5523_id_table,
+ .probe = ar5523_probe,
+ .disconnect = ar5523_disconnect,
+};
+
+static int __init ar5523_init(void)
+{
+ return usb_register(&ar5523_driver);
+}
+
+static void __exit ar5523_exit(void)
+{
+ usb_deregister(&ar5523_driver);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
+
+module_init(ar5523_init);
+module_exit(ar5523_exit);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
new file mode 100644
index 0000000..00c6fd3
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define AR5523_FLAG_PRE_FIRMWARE (1 << 0)
+#define AR5523_FLAG_ABG (1 << 1)
+
+#define AR5523_FIRMWARE_FILE "ar5523.bin"
+
+#define AR5523_CMD_TX_PIPE 0x01
+#define AR5523_DATA_TX_PIPE 0x02
+#define AR5523_CMD_RX_PIPE 0x81
+#define AR5523_DATA_RX_PIPE 0x82
+
+#define ar5523_cmd_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
+#define ar5523_data_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
+#define ar5523_cmd_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
+#define ar5523_data_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
+
+#define AR5523_DATA_TIMEOUT 10000
+#define AR5523_CMD_TIMEOUT 1000
+
+#define AR5523_TX_DATA_COUNT 8
+#define AR5523_TX_DATA_RESTART_COUNT 2
+#define AR5523_RX_DATA_COUNT 16
+#define AR5523_RX_DATA_REFILL_COUNT 8
+
+#define AR5523_CMD_ID 1
+#define AR5523_DATA_ID 2
+
+#define AR5523_TX_WD_TIMEOUT (HZ * 2)
+#define AR5523_FLUSH_TIMEOUT (HZ * 3)
+
+enum AR5523_flags {
+ AR5523_HW_UP,
+ AR5523_USB_DISCONNECTED,
+ AR5523_CONNECTED
+};
+
+struct ar5523_tx_cmd {
+ struct ar5523 *ar;
+ struct urb *urb_tx;
+ void *buf_tx;
+ void *odata;
+ int olen;
+ int flags;
+ int res;
+ struct completion done;
+};
+
+/* This struct is placed in tx_info->driver_data. It must not be larger
+ * than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
+ */
+struct ar5523_tx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct sk_buff *skb;
+ struct urb *urb;
+};
+
+struct ar5523_rx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct urb *urb;
+ struct sk_buff *skb;
+};
+
+struct ar5523 {
+ struct usb_device *dev;
+ struct ieee80211_hw *hw;
+
+ unsigned long flags;
+ struct mutex mutex;
+ struct workqueue_struct *wq;
+
+ struct ar5523_tx_cmd tx_cmd;
+
+ struct delayed_work stat_work;
+
+ struct timer_list tx_wd_timer;
+ struct work_struct tx_wd_work;
+ struct work_struct tx_work;
+ struct list_head tx_queue_pending;
+ struct list_head tx_queue_submitted;
+ spinlock_t tx_data_list_lock;
+ wait_queue_head_t tx_flush_waitq;
+
+ /* Queued + Submitted TX frames */
+ atomic_t tx_nr_total;
+
+ /* Submitted TX frames */
+ atomic_t tx_nr_pending;
+
+ void *rx_cmd_buf;
+ struct urb *rx_cmd_urb;
+
+ struct ar5523_rx_data rx_data[AR5523_RX_DATA_COUNT];
+ spinlock_t rx_data_list_lock;
+ struct list_head rx_data_free;
+ struct list_head rx_data_used;
+ atomic_t rx_data_free_cnt;
+
+ struct work_struct rx_refill_work;
+
+ unsigned int rxbufsz;
+ u8 serial[16];
+
+ struct ieee80211_channel channels[14];
+ struct ieee80211_rate rates[12];
+ struct ieee80211_supported_band band;
+ struct ieee80211_vif *vif;
+};
+
+/* flags for sending firmware commands */
+#define AR5523_CMD_FLAG_READ (1 << 1)
+#define AR5523_CMD_FLAG_MAGIC (1 << 2)
+
+#define ar5523_dbg(ar, format, arg...) \
+ dev_dbg(&(ar)->dev->dev, format, ## arg)
+
+/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
+ * fail. Instead of dealing with them in every possible place just surpress
+ * any messages on USB disconnect.
+ */
+#define ar5523_err(ar, format, arg...) \
+do { \
+ if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
+ dev_err(&(ar)->dev->dev, format, ## arg); \
+ } \
+} while (0)
+#define ar5523_info(ar, format, arg...) \
+ dev_info(&(ar)->dev->dev, format, ## arg)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523_hw.h b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
new file mode 100644
index 0000000..a0e8bf4
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* all fields are big endian */
+struct ar5523_fwblock {
+ __be32 flags;
+#define AR5523_WRITE_BLOCK (1 << 4)
+
+ __be32 len;
+#define AR5523_MAX_FWBLOCK_SIZE 2048
+
+ __be32 total;
+ __be32 remain;
+ __be32 rxtotal;
+ __be32 pad[123];
+} __packed;
+
+#define AR5523_MAX_RXCMDSZ 1024
+#define AR5523_MAX_TXCMDSZ 1024
+
+struct ar5523_cmd_hdr {
+ __be32 len;
+ __be32 code;
+/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
+/* messages from Host -> Target */
+#define WDCMSG_HOST_AVAILABLE 0x01
+#define WDCMSG_BIND 0x02
+#define WDCMSG_TARGET_RESET 0x03
+#define WDCMSG_TARGET_GET_CAPABILITY 0x04
+#define WDCMSG_TARGET_SET_CONFIG 0x05
+#define WDCMSG_TARGET_GET_STATUS 0x06
+#define WDCMSG_TARGET_GET_STATS 0x07
+#define WDCMSG_TARGET_START 0x08
+#define WDCMSG_TARGET_STOP 0x09
+#define WDCMSG_TARGET_ENABLE 0x0a
+#define WDCMSG_TARGET_DISABLE 0x0b
+#define WDCMSG_CREATE_CONNECTION 0x0c
+#define WDCMSG_UPDATE_CONNECT_ATTR 0x0d
+#define WDCMSG_DELETE_CONNECT 0x0e
+#define WDCMSG_SEND 0x0f
+#define WDCMSG_FLUSH 0x10
+/* messages from Target -> Host */
+#define WDCMSG_STATS_UPDATE 0x11
+#define WDCMSG_BMISS 0x12
+#define WDCMSG_DEVICE_AVAIL 0x13
+#define WDCMSG_SEND_COMPLETE 0x14
+#define WDCMSG_DATA_AVAIL 0x15
+#define WDCMSG_SET_PWR_MODE 0x16
+#define WDCMSG_BMISS_ACK 0x17
+#define WDCMSG_SET_LED_STEADY 0x18
+#define WDCMSG_SET_LED_BLINK 0x19
+/* more messages */
+#define WDCMSG_SETUP_BEACON_DESC 0x1a
+#define WDCMSG_BEACON_INIT 0x1b
+#define WDCMSG_RESET_KEY_CACHE 0x1c
+#define WDCMSG_RESET_KEY_CACHE_ENTRY 0x1d
+#define WDCMSG_SET_KEY_CACHE_ENTRY 0x1e
+#define WDCMSG_SET_DECOMP_MASK 0x1f
+#define WDCMSG_SET_REGULATORY_DOMAIN 0x20
+#define WDCMSG_SET_LED_STATE 0x21
+#define WDCMSG_WRITE_ASSOCID 0x22
+#define WDCMSG_SET_STA_BEACON_TIMERS 0x23
+#define WDCMSG_GET_TSF 0x24
+#define WDCMSG_RESET_TSF 0x25
+#define WDCMSG_SET_ADHOC_MODE 0x26
+#define WDCMSG_SET_BASIC_RATE 0x27
+#define WDCMSG_MIB_CONTROL 0x28
+#define WDCMSG_GET_CHANNEL_DATA 0x29
+#define WDCMSG_GET_CUR_RSSI 0x2a
+#define WDCMSG_SET_ANTENNA_SWITCH 0x2b
+#define WDCMSG_USE_SHORT_SLOT_TIME 0x2f
+#define WDCMSG_SET_POWER_MODE 0x30
+#define WDCMSG_SETUP_PSPOLL_DESC 0x31
+#define WDCMSG_SET_RX_MULTICAST_FILTER 0x32
+#define WDCMSG_RX_FILTER 0x33
+#define WDCMSG_PER_CALIBRATION 0x34
+#define WDCMSG_RESET 0x35
+#define WDCMSG_DISABLE 0x36
+#define WDCMSG_PHY_DISABLE 0x37
+#define WDCMSG_SET_TX_POWER_LIMIT 0x38
+#define WDCMSG_SET_TX_QUEUE_PARAMS 0x39
+#define WDCMSG_SETUP_TX_QUEUE 0x3a
+#define WDCMSG_RELEASE_TX_QUEUE 0x3b
+#define WDCMSG_SET_DEFAULT_KEY 0x43
+
+ __u32 priv; /* driver private data,
+ don't care about endianess */
+ __be32 magic;
+ __be32 reserved2[4];
+};
+
+struct ar5523_cmd_host_available {
+ __be32 sw_ver_major;
+ __be32 sw_ver_minor;
+ __be32 sw_ver_patch;
+ __be32 sw_ver_build;
+} __packed;
+
+#define ATH_SW_VER_MAJOR 1
+#define ATH_SW_VER_MINOR 5
+#define ATH_SW_VER_PATCH 0
+#define ATH_SW_VER_BUILD 9999
+
+struct ar5523_chunk {
+ u8 seqnum; /* sequence number for ordering */
+ u8 flags;
+#define UATH_CFLAGS_FINAL 0x01 /* final chunk of a msg */
+#define UATH_CFLAGS_RXMSG 0x02 /* chunk contains rx completion */
+#define UATH_CFLAGS_DEBUG 0x04 /* for debugging */
+ __be16 length; /* chunk size in bytes */
+ /* chunk data follows */
+} __packed;
+
+/*
+ * Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
+ */
+struct ar5523_rx_desc {
+ __be32 len; /* msg length including header */
+ __be32 code; /* WDCMSG_DATA_AVAIL */
+ __be32 gennum; /* generation number */
+ __be32 status; /* start of RECEIVE_INFO */
+#define UATH_STATUS_OK 0
+#define UATH_STATUS_STOP_IN_PROGRESS 1
+#define UATH_STATUS_CRC_ERR 2
+#define UATH_STATUS_PHY_ERR 3
+#define UATH_STATUS_DECRYPT_CRC_ERR 4
+#define UATH_STATUS_DECRYPT_MIC_ERR 5
+#define UATH_STATUS_DECOMP_ERR 6
+#define UATH_STATUS_KEY_ERR 7
+#define UATH_STATUS_ERR 8
+ __be32 tstamp_low; /* low-order 32-bits of rx timestamp */
+ __be32 tstamp_high; /* high-order 32-bits of rx timestamp */
+ __be32 framelen; /* frame length */
+ __be32 rate; /* rx rate code */
+ __be32 antenna;
+ __be32 rssi;
+ __be32 channel;
+ __be32 phyerror;
+ __be32 connix; /* key table ix for bss traffic */
+ __be32 decrypterror;
+ __be32 keycachemiss;
+ __be32 pad; /* XXX? */
+} __packed;
+
+struct ar5523_tx_desc {
+ __be32 msglen;
+ __be32 msgid; /* msg id (supplied by host) */
+ __be32 type; /* opcode: WDMSG_SEND or WDCMSG_FLUSH */
+ __be32 txqid; /* tx queue id and flags */
+#define UATH_TXQID_MASK 0x0f
+#define UATH_TXQID_MINRATE 0x10 /* use min tx rate */
+#define UATH_TXQID_FF 0x20 /* content is fast frame */
+ __be32 connid; /* tx connection id */
+#define UATH_ID_INVALID 0xffffffff /* for sending prior to connection */
+ __be32 flags; /* non-zero if response desired */
+#define UATH_TX_NOTIFY (1 << 24) /* f/w will send a UATH_NOTIF_TX */
+ __be32 buflen; /* payload length */
+} __packed;
+
+
+#define AR5523_ID_BSS 2
+#define AR5523_ID_BROADCAST 0xffffffff
+
+/* structure for command UATH_CMD_WRITE_MAC */
+struct ar5523_write_mac {
+ __be32 reg;
+ __be32 len;
+ u8 data[32];
+} __packed;
+
+struct ar5523_cmd_rateset {
+ __u8 length;
+#define AR5523_MAX_NRATES 32
+ __u8 set[AR5523_MAX_NRATES];
+};
+
+struct ar5523_cmd_set_associd { /* AR5523_WRITE_ASSOCID */
+ __be32 defaultrateix;
+ __be32 associd;
+ __be32 timoffset;
+ __be32 turboprime;
+ __u8 bssid[6];
+} __packed;
+
+/* structure for command WDCMSG_RESET */
+struct ar5523_cmd_reset {
+ __be32 flags; /* channel flags */
+#define UATH_CHAN_TURBO 0x0100
+#define UATH_CHAN_CCK 0x0200
+#define UATH_CHAN_OFDM 0x0400
+#define UATH_CHAN_2GHZ 0x1000
+#define UATH_CHAN_5GHZ 0x2000
+ __be32 freq; /* channel frequency */
+ __be32 maxrdpower;
+ __be32 cfgctl;
+ __be32 twiceantennareduction;
+ __be32 channelchange;
+ __be32 keeprccontent;
+} __packed;
+
+/* structure for command WDCMSG_SET_BASIC_RATE */
+struct ar5523_cmd_rates {
+ __be32 connid;
+ __be32 keeprccontent;
+ __be32 size;
+ struct ar5523_cmd_rateset rateset;
+} __packed;
+
+enum {
+ WLAN_MODE_NONE = 0,
+ WLAN_MODE_11b,
+ WLAN_MODE_11a,
+ WLAN_MODE_11g,
+ WLAN_MODE_11a_TURBO,
+ WLAN_MODE_11g_TURBO,
+ WLAN_MODE_11a_TURBO_PRIME,
+ WLAN_MODE_11g_TURBO_PRIME,
+ WLAN_MODE_11a_XR,
+ WLAN_MODE_11g_XR,
+};
+
+struct ar5523_cmd_connection_attr {
+ __be32 longpreambleonly;
+ struct ar5523_cmd_rateset rateset;
+ __be32 wlanmode;
+} __packed;
+
+/* structure for command AR5523_CREATE_CONNECTION */
+struct ar5523_cmd_create_connection {
+ __be32 connid;
+ __be32 bssid;
+ __be32 size;
+ struct ar5523_cmd_connection_attr connattr;
+} __packed;
+
+struct ar5523_cmd_ledsteady { /* WDCMSG_SET_LED_STEADY */
+ __be32 lednum;
+#define UATH_LED_LINK 0
+#define UATH_LED_ACTIVITY 1
+ __be32 ledmode;
+#define UATH_LED_OFF 0
+#define UATH_LED_ON 1
+} __packed;
+
+struct ar5523_cmd_ledblink { /* WDCMSG_SET_LED_BLINK */
+ __be32 lednum;
+ __be32 ledmode;
+ __be32 blinkrate;
+ __be32 slowmode;
+} __packed;
+
+struct ar5523_cmd_ledstate { /* WDCMSG_SET_LED_STATE */
+ __be32 connected;
+} __packed;
+
+struct ar5523_cmd_txq_attr {
+ __be32 priority;
+ __be32 aifs;
+ __be32 logcwmin;
+ __be32 logcwmax;
+ __be32 bursttime;
+ __be32 mode;
+ __be32 qflags;
+} __packed;
+
+struct ar5523_cmd_txq_setup { /* WDCMSG_SETUP_TX_QUEUE */
+ __be32 qid;
+ __be32 len;
+ struct ar5523_cmd_txq_attr attr;
+} __packed;
+
+struct ar5523_cmd_rx_filter { /* WDCMSG_RX_FILTER */
+ __be32 bits;
+#define UATH_FILTER_RX_UCAST 0x00000001
+#define UATH_FILTER_RX_MCAST 0x00000002
+#define UATH_FILTER_RX_BCAST 0x00000004
+#define UATH_FILTER_RX_CONTROL 0x00000008
+#define UATH_FILTER_RX_BEACON 0x00000010 /* beacon frames */
+#define UATH_FILTER_RX_PROM 0x00000020 /* promiscuous mode */
+#define UATH_FILTER_RX_PHY_ERR 0x00000040 /* phy errors */
+#define UATH_FILTER_RX_PHY_RADAR 0x00000080 /* radar phy errors */
+#define UATH_FILTER_RX_XR_POOL 0x00000400 /* XR group polls */
+#define UATH_FILTER_RX_PROBE_REQ 0x00000800
+ __be32 op;
+#define UATH_FILTER_OP_INIT 0x0
+#define UATH_FILTER_OP_SET 0x1
+#define UATH_FILTER_OP_CLEAR 0x2
+#define UATH_FILTER_OP_TEMP 0x3
+#define UATH_FILTER_OP_RESTORE 0x4
+} __packed;
+
+enum {
+ CFG_NONE, /* Sentinal to indicate "no config" */
+ CFG_REG_DOMAIN, /* Regulatory Domain */
+ CFG_RATE_CONTROL_ENABLE,
+ CFG_DEF_XMIT_DATA_RATE, /* NB: if rate control is not enabled */
+ CFG_HW_TX_RETRIES,
+ CFG_SW_TX_RETRIES,
+ CFG_SLOW_CLOCK_ENABLE,
+ CFG_COMP_PROC,
+ CFG_USER_RTS_THRESHOLD,
+ CFG_XR2NORM_RATE_THRESHOLD,
+ CFG_XRMODE_SWITCH_COUNT,
+ CFG_PROTECTION_TYPE,
+ CFG_BURST_SEQ_THRESHOLD,
+ CFG_ABOLT,
+ CFG_IQ_LOG_COUNT_MAX,
+ CFG_MODE_CTS,
+ CFG_WME_ENABLED,
+ CFG_GPRS_CBR_PERIOD,
+ CFG_SERVICE_TYPE,
+ /* MAC Address to use. Overrides EEPROM */
+ CFG_MAC_ADDR,
+ CFG_DEBUG_EAR,
+ CFG_INIT_REGS,
+ /* An ID for use in error & debug messages */
+ CFG_DEBUG_ID,
+ CFG_COMP_WIN_SZ,
+ CFG_DIVERSITY_CTL,
+ CFG_TP_SCALE,
+ CFG_TPC_HALF_DBM5,
+ CFG_TPC_HALF_DBM2,
+ CFG_OVERRD_TX_POWER,
+ CFG_USE_32KHZ_CLOCK,
+ CFG_GMODE_PROTECTION,
+ CFG_GMODE_PROTECT_RATE_INDEX,
+ CFG_GMODE_NON_ERP_PREAMBLE,
+ CFG_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ /* Sentinal to indicate "no capability" */
+ CAP_NONE,
+ CAP_ALL, /* ALL capabilities */
+ CAP_TARGET_VERSION,
+ CAP_TARGET_REVISION,
+ CAP_MAC_VERSION,
+ CAP_MAC_REVISION,
+ CAP_PHY_REVISION,
+ CAP_ANALOG_5GHz_REVISION,
+ CAP_ANALOG_2GHz_REVISION,
+ /* Target supports WDC message debug features */
+ CAP_DEBUG_WDCMSG_SUPPORT,
+
+ CAP_REG_DOMAIN,
+ CAP_COUNTRY_CODE,
+ CAP_REG_CAP_BITS,
+
+ CAP_WIRELESS_MODES,
+ CAP_CHAN_SPREAD_SUPPORT,
+ CAP_SLEEP_AFTER_BEACON_BROKEN,
+ CAP_COMPRESS_SUPPORT,
+ CAP_BURST_SUPPORT,
+ CAP_FAST_FRAMES_SUPPORT,
+ CAP_CHAP_TUNING_SUPPORT,
+ CAP_TURBOG_SUPPORT,
+ CAP_TURBO_PRIME_SUPPORT,
+ CAP_DEVICE_TYPE,
+ CAP_XR_SUPPORT,
+ CAP_WME_SUPPORT,
+ CAP_TOTAL_QUEUES,
+ CAP_CONNECTION_ID_MAX, /* Should absorb CAP_KEY_CACHE_SIZE */
+
+ CAP_LOW_5GHZ_CHAN,
+ CAP_HIGH_5GHZ_CHAN,
+ CAP_LOW_2GHZ_CHAN,
+ CAP_HIGH_2GHZ_CHAN,
+
+ CAP_MIC_AES_CCM,
+ CAP_MIC_CKIP,
+ CAP_MIC_TKIP,
+ CAP_MIC_TKIP_WME,
+ CAP_CIPHER_AES_CCM,
+ CAP_CIPHER_CKIP,
+ CAP_CIPHER_TKIP,
+
+ CAP_TWICE_ANTENNAGAIN_5G,
+ CAP_TWICE_ANTENNAGAIN_2G,
+};
+
+enum {
+ ST_NONE, /* Sentinal to indicate "no status" */
+ ST_ALL,
+ ST_SERVICE_TYPE,
+ ST_WLAN_MODE,
+ ST_FREQ,
+ ST_BAND,
+ ST_LAST_RSSI,
+ ST_PS_FRAMES_DROPPED,
+ ST_CACHED_DEF_ANT,
+ ST_COUNT_OTHER_RX_ANT,
+ ST_USE_FAST_DIVERSITY,
+ ST_MAC_ADDR,
+ ST_RX_GENERATION_NUM,
+ ST_TX_QUEUE_DEPTH,
+ ST_SERIAL_NUMBER,
+ ST_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ TARGET_DEVICE_AWAKE,
+ TARGET_DEVICE_SLEEP,
+ TARGET_DEVICE_PWRDN,
+ TARGET_DEVICE_PWRSAVE,
+ TARGET_DEVICE_SUSPEND,
+ TARGET_DEVICE_RESUME,
+};
+
+/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
+#define IEEE80211_2ADDR_LEN 16
+
+#define AR5523_MIN_RXBUFSZ \
+ (((sizeof(__be32) + IEEE80211_2ADDR_LEN + \
+ sizeof(struct ar5523_rx_desc)) + 3) & ~3)
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7089f81..2770899 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -301,7 +301,7 @@ static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
static bool ath6kl_is_wpa_ie(const u8 *pos)
{
- return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 &&
pos[4] == 0xf2 && pos[5] == 0x01;
}
@@ -3651,7 +3651,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
ar->fw_capabilities))
- ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
+ ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 84b558d..162401f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -276,6 +276,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
offset_array[i],
REG_READ(ah, offset_array[i]));
+ if (AR_SREV_9565(ah) &&
+ (iCoff == 63 || qCoff == 63 ||
+ iCoff == -63 || qCoff == -63))
+ return;
+
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
iCoff);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 5bbe505..c86cb640 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -18,6 +18,7 @@
#include "hw.h"
#include "ar9003_phy.h"
#include "ar9003_eeprom.h"
+#include "ar9003_mci.h"
#define COMP_HDR_LEN 4
#define COMP_CKSUM_LEN 2
@@ -41,7 +42,6 @@
static int ar9003_hw_power_interpolate(int32_t x,
int32_t *px, int32_t *py, u_int16_t np);
-
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
@@ -2989,7 +2989,7 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_PAPRD:
if (AR_SREV_9462(ah))
return false;
- if (!ah->config.enable_paprd);
+ if (!ah->config.enable_paprd)
return false;
return !!(pBase->featureEnable & BIT(5));
case EEP_CHAIN_MASK_REDUCE:
@@ -3601,7 +3601,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
* 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
* SWITCH_TABLE_COM_SPDT_WLAN_IDLE
*/
- if (AR_SREV_9462_20_OR_LATER(ah)) {
+ if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
value = ar9003_switch_com_spdt_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -5037,16 +5037,28 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
case CTL_5GHT20:
case CTL_2GHT20:
for (i = ALL_TARGET_HT20_0_8_16;
- i <= ALL_TARGET_HT20_23; i++)
+ i <= ALL_TARGET_HT20_23; i++) {
pPwrArray[i] = (u8)min((u16)pPwrArray[i],
minCtlPower);
+ if (ath9k_hw_mci_is_enabled(ah))
+ pPwrArray[i] =
+ (u8)min((u16)pPwrArray[i],
+ ar9003_mci_get_max_txpower(ah,
+ pCtlMode[ctlMode]));
+ }
break;
case CTL_5GHT40:
case CTL_2GHT40:
for (i = ALL_TARGET_HT40_0_8_16;
- i <= ALL_TARGET_HT40_23; i++)
+ i <= ALL_TARGET_HT40_23; i++) {
pPwrArray[i] = (u8)min((u16)pPwrArray[i],
minCtlPower);
+ if (ath9k_hw_mci_is_enabled(ah))
+ pPwrArray[i] =
+ (u8)min((u16)pPwrArray[i],
+ ar9003_mci_get_max_txpower(ah,
+ pCtlMode[ctlMode]));
+ }
break;
default:
break;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1a36fa2..0693cd9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -219,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
+ ar9462_pciephy_clkreq_disable_L1_2p0);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
+ ar9462_pciephy_clkreq_disable_L1_2p0);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -328,9 +328,9 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+ ar9565_1p0_pciephy_clkreq_disable_L1);
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+ ar9565_1p0_pciephy_clkreq_disable_L1);
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 44c202c..42b4412 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -750,6 +750,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
mci_hw->bt_state = MCI_BT_AWAKE;
+ REG_CLR_BIT(ah, AR_PHY_TIMING4,
+ 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
@@ -759,6 +762,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
+ REG_SET_BIT(ah, AR_PHY_TIMING4,
+ 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
exit:
ar9003_mci_enable_interrupt(ah);
return 0;
@@ -799,6 +805,9 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+ if (AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1);
+
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
@@ -818,7 +827,7 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 regval;
+ u32 regval, i;
ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
is_full_sleep, is_2g);
@@ -847,11 +856,18 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
- SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
- SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ if (AR_SREV_9565(ah)) {
+ regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+ } else {
+ regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+ }
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
@@ -865,9 +881,24 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+ /* Set the time out to 3.125ms (5 BT slots) */
+ REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090);
+
+ /* concurrent tx priority */
+ if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) {
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_REDUCE_TXPWR, 0);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f);
+ }
+
regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
@@ -910,6 +941,9 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
mci->ready = true;
ar9003_mci_prep_interface(ah);
+ if (AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
if (en_int)
ar9003_mci_enable_interrupt(ah);
@@ -1028,7 +1062,9 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
- REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
+
+ if (AR_SREV_9462(ah))
+ REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
} else {
ar9003_mci_send_lna_take(ah, true);
udelay(5);
@@ -1170,7 +1206,7 @@ EXPORT_SYMBOL(ar9003_mci_cleanup);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 value = 0;
+ u32 value = 0, tsf;
u8 query_type;
switch (state_type) {
@@ -1228,6 +1264,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
case MCI_STATE_RECOVER_RX:
+ tsf = ath9k_hw_gettsf32(ah);
+ if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) {
+ ath_dbg(ath9k_hw_common(ah), MCI,
+ "(MCI) ignore Rx recovery\n");
+ break;
+ }
+ ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n");
+ mci->last_recovery = tsf;
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
@@ -1426,3 +1470,17 @@ void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
ar9003_mci_send_coex_wlan_channels(ah, true);
}
EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
+
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+ if (!ah->btcoex_hw.mci.concur_tx)
+ goto out;
+
+ if (ctlmode == CTL_2GHT20)
+ return ATH_BTCOEX_HT20_MAX_TXPOWER;
+ else if (ctlmode == CTL_2GHT40)
+ return ATH_BTCOEX_HT40_MAX_TXPOWER;
+
+out:
+ return -1;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 2a2d018..66d7ab9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -18,6 +18,7 @@
#define AR9003_MCI_H
#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+#define MCI_RECOVERY_DUR_TSF (100 * 1000) /* 100 ms */
/* Default remote BT device MCI COEX version */
#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
@@ -125,6 +126,7 @@ enum ath_mci_gpm_coex_profile_type {
MCI_GPM_COEX_PROFILE_HID,
MCI_GPM_COEX_PROFILE_BNEP,
MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_A2DPVO,
MCI_GPM_COEX_PROFILE_MAX
};
@@ -196,7 +198,6 @@ enum mci_state_type {
MCI_STATE_SEND_WLAN_COEX_VERSION,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
- MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
MCI_STATE_DEBUG,
@@ -278,6 +279,7 @@ void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
void ar9003_mci_set_power_awake(struct ath_hw *ah);
void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode);
#else
@@ -324,6 +326,10 @@ static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
{
}
+static inline u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+ return -1;
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 9a48e3d..8f58523 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -32,6 +32,7 @@
#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+#define AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT 16
#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 843e79f..0c2ac0c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -768,9 +768,9 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
+static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
- {0x00018c00, 0x18212ede},
+ {0x00018c00, 0x18213ede},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0003780c},
};
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index dfe6a47..4e125d8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -437,6 +437,7 @@ void ath9k_set_beacon(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+#define ATH_ANI_MAX_SKIP_COUNT 10
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
#define ATH_PLL_WORK_INTERVAL 100
@@ -478,6 +479,7 @@ struct ath_btcoex {
u32 btscan_no_stomp; /* in usec */
u32 duty_cycle;
u32 bt_wait_time;
+ int rssi_count;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
};
@@ -492,6 +494,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 len, u32 size);
#else
static inline int ath9k_init_btcoex(struct ath_softc *sc)
{
@@ -518,6 +521,11 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
}
+static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf,
+ u32 len, u32 size)
+{
+ return 0;
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
struct ath9k_wow_pattern {
@@ -642,6 +650,7 @@ enum sc_op_flags {
#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
#define PS_WAIT_FOR_TX_ACK BIT(3)
#define PS_BEACON_SYNC BIT(4)
+#define PS_WAIT_FOR_ANI BIT(5)
struct ath_rate_table;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 419e9a3..c90e9bc 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -195,7 +195,7 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
ah->btcoex_hw.mci.need_flush_btinfo = false;
ah->btcoex_hw.mci.wlan_cal_seq = 0;
ah->btcoex_hw.mci.wlan_cal_done = 0;
- ah->btcoex_hw.mci.config = 0x2201;
+ ah->btcoex_hw.mci.config = (AR_SREV_9462(ah)) ? 0x2201 : 0xa4c1;
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci);
@@ -218,27 +218,45 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u8 txprio_shift[] = { 24, 16, 16, 0 }; /* tx priority weight */
+ bool concur_tx = (mci_hw->concur_tx && btcoex_hw->tx_prio[stomp_type]);
+ const u32 *weight = ar9003_wlan_weights[stomp_type];
+ int i;
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- const u32 *weight = ar9003_wlan_weights[stomp_type];
- int i;
-
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
- btcoex_hw->mci.stomp_ftp)
- stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
- weight = mci_wlan_weights[stomp_type];
- }
-
- for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
- btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
- btcoex_hw->wlan_weight[i] = weight[i];
- }
- } else {
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->bt_coex_weights =
SM(bt_weight, AR_BTCOEX_BT_WGHT) |
SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+ return;
+ }
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ enum ath_stomp_type stype =
+ ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+ btcoex_hw->mci.stomp_ftp) ?
+ ATH_BTCOEX_STOMP_LOW_FTP : stomp_type;
+ weight = mci_wlan_weights[stype];
}
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex_hw->wlan_weight[i] = weight[i];
+ if (concur_tx && i) {
+ btcoex_hw->wlan_weight[i] &=
+ ~(0xff << txprio_shift[i-1]);
+ btcoex_hw->wlan_weight[i] |=
+ (btcoex_hw->tx_prio[stomp_type] <<
+ txprio_shift[i-1]);
+ }
+ }
+ /* Last WLAN weight has to be adjusted wrt tx priority */
+ if (concur_tx) {
+ btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
+ btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
+ << txprio_shift[i-1]);
+ }
+
}
EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
@@ -385,3 +403,13 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
}
}
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
+
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+ btcoex->tx_prio[i] = stomp_txprio[i];
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_set_concur_txprio);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 385197a..2f84ab2 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -39,6 +39,9 @@
#define ATH_BTCOEX_RX_WAIT_TIME 100
#define ATH_BTCOEX_STOMP_FTP_THRESH 5
+#define ATH_BTCOEX_HT20_MAX_TXPOWER 0x14
+#define ATH_BTCOEX_HT40_MAX_TXPOWER 0x10
+
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
@@ -84,6 +87,8 @@ struct ath9k_hw_mci {
u8 bt_ver_minor;
u8 bt_state;
u8 stomp_ftp;
+ bool concur_tx;
+ u32 last_recovery;
};
struct ath_btcoex_hw {
@@ -98,6 +103,7 @@ struct ath_btcoex_hw {
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
+ u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
};
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
@@ -112,5 +118,6 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
void ath9k_hw_btcoex_disable(struct ath_hw *ah);
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type);
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e5cceb0..f3448a0 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -410,6 +410,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
+ ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6727b56..a8be94b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1586,6 +1586,35 @@ static const struct file_operations fops_samps = {
#endif
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ u32 len = 0, size = 1500;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = ath9k_dump_btcoex(sc, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_btcoex = {
+ .read = read_file_btcoex,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+#endif
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1658,6 +1687,9 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_ant_diversity);
-
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_btcoex);
+#endif
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index d9ed141..a8ea57b 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -187,6 +187,24 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
}
}
+static void ath_mci_ftp_adjust(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
+ (mci->num_pan || mci->num_other_acl))
+ ah->btcoex_hw.mci.stomp_ftp =
+ (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
+ else
+ ah->btcoex_hw.mci.stomp_ftp = false;
+ btcoex->bt_wait_time = 0;
+ sc->rx.num_pkts = 0;
+ }
+}
+
/*
* This is the master bt coex timer which runs for every
* 45ms, bt traffic will be given priority during 55% of this
@@ -197,41 +215,43 @@ static void ath_btcoex_period_timer(unsigned long data)
struct ath_softc *sc = (struct ath_softc *) data;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_mci_profile *mci = &btcoex->mci;
+ enum ath_stomp_type stomp_type;
u32 timer_period;
- bool is_btscan;
unsigned long flags;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
+ btcoex->bt_wait_time += btcoex->btcoex_period;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
goto skip_hw_wakeup;
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ ath9k_mci_update_rssi(sc);
+
ath9k_ps_wakeup(sc);
+
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath_detect_bt_priority(sc);
- is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
- btcoex->bt_wait_time += btcoex->btcoex_period;
- if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
- if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
- (mci->num_pan || mci->num_other_acl))
- ah->btcoex_hw.mci.stomp_ftp =
- (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
- else
- ah->btcoex_hw.mci.stomp_ftp = false;
- btcoex->bt_wait_time = 0;
- sc->rx.num_pkts = 0;
- }
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ath_mci_ftp_adjust(sc);
spin_lock_bh(&btcoex->btcoex_lock);
- ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
- btcoex->bt_stomp_type);
+ stomp_type = btcoex->bt_stomp_type;
+ timer_period = btcoex->btcoex_no_stomp;
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) {
+ if (test_bit(BT_OP_SCAN, &btcoex->op_flags)) {
+ stomp_type = ATH_BTCOEX_STOMP_ALL;
+ timer_period = btcoex->btscan_no_stomp;
+ }
+ }
+ ath9k_hw_btcoex_bt_stomp(ah, stomp_type);
ath9k_hw_btcoex_enable(ah);
+
spin_unlock_bh(&btcoex->btcoex_lock);
/*
@@ -243,17 +263,16 @@ static void ath_btcoex_period_timer(unsigned long data)
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
- timer_period = is_btscan ? btcoex->btscan_no_stomp :
- btcoex->btcoex_no_stomp;
ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
timer_period * 10);
btcoex->hw_timer_enabled = true;
}
ath9k_ps_restore(sc);
+
skip_hw_wakeup:
- timer_period = btcoex->btcoex_period;
- mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
+ mod_timer(&btcoex->period_timer,
+ jiffies + msecs_to_jiffies(btcoex->btcoex_period));
}
/*
@@ -273,7 +292,8 @@ static void ath_btcoex_no_stomp_timer(void *arg)
spin_lock_bh(&btcoex->btcoex_lock);
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
- test_bit(BT_OP_SCAN, &btcoex->op_flags))
+ (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI) &&
+ test_bit(BT_OP_SCAN, &btcoex->op_flags)))
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
@@ -474,4 +494,52 @@ int ath9k_init_btcoex(struct ath_softc *sc)
return 0;
}
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 len, u32 size)
+{
+#define ATH_DUMP_BTCOEX(_s, _val) \
+ do { \
+ len += snprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
+ } while (0)
+
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ ATH_DUMP_BTCOEX("Total BT profiles", NUM_PROF(mci));
+ ATH_DUMP_BTCOEX("Number of MGMT", mci->num_mgmt);
+ ATH_DUMP_BTCOEX("Number of SCO", mci->num_sco);
+ ATH_DUMP_BTCOEX("Number of A2DP", mci->num_a2dp);
+ ATH_DUMP_BTCOEX("Number of HID", mci->num_hid);
+ ATH_DUMP_BTCOEX("Number of PAN", mci->num_pan);
+ ATH_DUMP_BTCOEX("Number of ACL", mci->num_other_acl);
+ ATH_DUMP_BTCOEX("Number of BDR", mci->num_bdr);
+ ATH_DUMP_BTCOEX("Aggr. Limit", mci->aggr_limit);
+ ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+ ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+ ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+ ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+ ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
+ ATH_DUMP_BTCOEX("Concur RSSI count", btcoex->rssi_count);
+ len += snprintf(buf + len, size - len, "BT Weights: ");
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ len += snprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += snprintf(buf + len, size - len, "\n");
+ len += snprintf(buf + len, size - len, "WLAN Weights: ");
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ len += snprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += snprintf(buf + len, size - len, "\n");
+ len += snprintf(buf + len, size - len, "Tx Priorities: ");
+ for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+ len += snprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->tx_prio[i]);
+ len += snprintf(buf + len, size - len, "\n");
+#undef ATH_DUMP_BTCOEX
+
+ return len;
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d98255e..5ecf128 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -694,6 +694,20 @@ err_hw:
return ret;
}
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) },
+ { .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+};
+
static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw)
{
@@ -716,6 +730,9 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index ca78e33..66f6a74 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1036,26 +1036,6 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
- mutex_unlock(&priv->mutex);
- return -ENOBUFS;
- }
-
- if (priv->num_ibss_vif ||
- (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
- ath_err(common, "IBSS coexistence with other modes is not allowed\n");
- mutex_unlock(&priv->mutex);
- return -ENOBUFS;
- }
-
- if (((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC)) &&
- ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
- ath_err(common, "Max. number of beaconing interfaces reached\n");
- mutex_unlock(&priv->mutex);
- return -ENOBUFS;
- }
-
ath9k_htc_ps_wakeup(priv);
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8e1559a..71cd9f0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2153,9 +2153,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
AR_RTC_FORCE_WAKE_EN);
udelay(50);
- if (ath9k_hw_mci_is_enabled(ah))
- ar9003_mci_set_power_awake(ah);
-
for (i = POWER_UP_TIME / 50; i > 0; i--) {
val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
if (val == AR_RTC_STATUS_ON)
@@ -2171,6 +2168,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
return false;
}
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_set_power_awake(ah);
+
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
return true;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbc1b7a..3e73bfe 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -401,6 +401,7 @@ enum ath9k_int {
struct ath9k_hw_cal_data {
u16 channel;
u32 channelFlags;
+ u32 chanmode;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
@@ -834,6 +835,7 @@ struct ath_hw {
int coarse_low[5];
int firpwr[5];
enum ath9k_ani_cmd ani_function;
+ u32 ani_skip_count;
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex_hw btcoex_hw;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fad3ccd..546bae9 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -687,6 +687,7 @@ static const struct ieee80211_iface_combination if_comb = {
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
+ .beacon_int_infra_match = true,
};
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 7b88b9c..223b969 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -350,8 +350,18 @@ void ath_ani_calibrate(unsigned long data)
ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
/* Only calibrate if awake */
- if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
+ if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
+ if (++ah->ani_skip_count >= ATH_ANI_MAX_SKIP_COUNT) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_WAIT_FOR_ANI;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
goto set_timer;
+ }
+ ah->ani_skip_count = 0;
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags &= ~PS_WAIT_FOR_ANI;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_wakeup(sc);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dd45edf..578a723 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -131,7 +131,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
- PS_WAIT_FOR_TX_ACK))) {
+ PS_WAIT_FOR_TX_ACK |
+ PS_WAIT_FOR_ANI))) {
mode = ATH9K_PM_NETWORK_SLEEP;
if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
ath9k_btcoex_stop_gen_timer(sc);
@@ -292,6 +293,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
goto out;
}
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
+ (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ ath9k_mci_set_txpower(sc, true, false);
+
if (!ath_complete_reset(sc, true))
r = -EIO;
@@ -1449,6 +1454,9 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ ath9k_mci_update_wlan_channels(sc, false);
+
ath_dbg(common, CONFIG,
"Primary Station interface: %pM, BSSID: %pM\n",
vif->addr, common->curbssid);
@@ -1505,6 +1513,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
memset(common->curbssid, 0, ETH_ALEN);
common->curaid = 0;
ath9k_hw_write_associd(sc->sc_ah);
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ ath9k_mci_update_wlan_channels(sc, true);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index ec2d7c8..0dd2cbb 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,6 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *info)
{
struct ath_mci_profile_info *entry;
+ u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
(info->type == MCI_GPM_COEX_PROFILE_VOICE))
@@ -59,6 +60,12 @@ static bool ath_mci_add_profile(struct ath_common *common,
memcpy(entry, info, 10);
INC_PROF(mci, info);
list_add_tail(&entry->list, &mci->info);
+ if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
+ if (info->voice_type < sizeof(voice_priority))
+ mci->voice_priority = voice_priority[info->voice_type];
+ else
+ mci->voice_priority = 110;
+ }
return true;
}
@@ -150,7 +157,7 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
* For single PAN/FTP profile, allocate 35% for BT
* to improve WLAN throughput.
*/
- btcoex->duty_cycle = 35;
+ btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
btcoex->btcoex_period = 53;
ath_dbg(common, MCI,
"Single PAN/FTP bt period %d ms dutycycle %d\n",
@@ -250,6 +257,57 @@ static void ath9k_mci_work(struct work_struct *work)
ath_mci_update_scheme(sc);
}
+static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
+{
+ if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
+ stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
+
+ if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
+ stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
+
+ if ((cur_txprio > ATH_MCI_HI_PRIO) &&
+ (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
+ stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
+}
+
+static void ath_mci_set_concur_txprio(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u8 stomp_txprio[] = { 0, 0, 0, 0 }; /* all, low, none, low_ftp */
+
+ if (mci->num_mgmt) {
+ stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
+ if (!mci->num_pan && !mci->num_other_acl)
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
+ ATH_MCI_INQUIRY_PRIO;
+ } else {
+ u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
+
+ stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
+
+ if (mci->num_sco)
+ ath_mci_update_stomp_txprio(mci->voice_priority,
+ stomp_txprio);
+ if (mci->num_other_acl)
+ ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
+ if (mci->num_a2dp)
+ ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
+ if (mci->num_hid)
+ ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
+ if (mci->num_pan)
+ ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
+
+ if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
+
+ if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
+ stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
+ }
+ ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
+}
+
static u8 ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info)
{
@@ -281,6 +339,7 @@ static u8 ath_mci_process_profile(struct ath_softc *sc,
} else
ath_mci_del_profile(common, mci, entry);
+ ath_mci_set_concur_txprio(sc);
return 1;
}
@@ -314,6 +373,7 @@ static u8 ath_mci_process_status(struct ath_softc *sc,
mci->num_mgmt++;
} while (++i < ATH_MCI_MAX_PROFILE);
+ ath_mci_set_concur_txprio(sc);
if (old_num_mgmt != mci->num_mgmt)
return 1;
@@ -600,3 +660,112 @@ void ath_mci_enable(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
sc->sc_ah->imask |= ATH9K_INT_MCI;
}
+
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ struct ath9k_channel *chan = ah->curchan;
+ u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
+ int i;
+ s16 chan_start, chan_end;
+ u16 wlan_chan;
+
+ if (!chan || !IS_CHAN_2GHZ(chan))
+ return;
+
+ if (allow_all)
+ goto send_wlan_chan;
+
+ wlan_chan = chan->channel - 2402;
+
+ chan_start = wlan_chan - 10;
+ chan_end = wlan_chan + 10;
+
+ if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ chan_end += 20;
+ else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ chan_start -= 20;
+
+ /* adjust side band */
+ chan_start -= 7;
+ chan_end += 7;
+
+ if (chan_start <= 0)
+ chan_start = 0;
+ if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
+ chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
+
+ ath_dbg(ath9k_hw_common(ah), MCI,
+ "WLAN current channel %d mask BT channel %d - %d\n",
+ wlan_chan, chan_start, chan_end);
+
+ for (i = chan_start; i < chan_end; i++)
+ MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
+
+send_wlan_chan:
+ /* update and send wlan channels info to BT */
+ for (i = 0; i < 4; i++)
+ mci->wlan_channels[i] = channelmap[i];
+ ar9003_mci_send_wlan_channels(ah);
+ ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
+}
+
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+ bool old_concur_tx = mci_hw->concur_tx;
+
+ if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
+ mci_hw->concur_tx = false;
+ return;
+ }
+
+ if (!IS_CHAN_2GHZ(ah->curchan))
+ return;
+
+ if (setchannel) {
+ struct ath9k_hw_cal_data *caldata = &sc->caldata;
+ if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+ (ah->curchan->channel > caldata->channel) &&
+ (ah->curchan->channel <= caldata->channel + 20))
+ return;
+ if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+ (ah->curchan->channel < caldata->channel) &&
+ (ah->curchan->channel >= caldata->channel - 20))
+ return;
+ mci_hw->concur_tx = false;
+ } else
+ mci_hw->concur_tx = concur_tx;
+
+ if (old_concur_tx != mci_hw->concur_tx)
+ ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+}
+
+void ath9k_mci_update_rssi(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+
+ if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
+ return;
+
+ if (ah->stats.avgbrssi >= 40) {
+ if (btcoex->rssi_count < 0)
+ btcoex->rssi_count = 0;
+ if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
+ btcoex->rssi_count = 0;
+ ath9k_mci_set_txpower(sc, false, true);
+ }
+ } else {
+ if (btcoex->rssi_count > 0)
+ btcoex->rssi_count = 0;
+ if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
+ btcoex->rssi_count = 0;
+ ath9k_mci_set_txpower(sc, false, false);
+ }
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index fc14eea..0695883 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -32,6 +32,27 @@
#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
ATH_MCI_MAX_SCO_PROFILE)
+#define ATH_MCI_INQUIRY_PRIO 62
+#define ATH_MCI_HI_PRIO 60
+#define ATH_MCI_NUM_BT_CHANNELS 79
+#define ATH_MCI_CONCUR_TX_SWITCH 5
+
+#define MCI_GPM_SET_CHANNEL_BIT(_p_gpm, _bt_chan) \
+ do { \
+ if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+ (_bt_chan / 8)) |= (1 << (_bt_chan & 7)); \
+ } \
+ } while (0)
+
+#define MCI_GPM_CLR_CHANNEL_BIT(_p_gpm, _bt_chan) \
+ do { \
+ if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+ (_bt_chan / 8)) &= ~(1 << (_bt_chan & 7));\
+ } \
+ } while (0)
+
#define INC_PROF(_mci, _info) do { \
switch (_info->type) { \
case MCI_GPM_COEX_PROFILE_RFCOMM:\
@@ -49,6 +70,7 @@
_mci->num_pan++; \
break; \
case MCI_GPM_COEX_PROFILE_VOICE: \
+ case MCI_GPM_COEX_PROFILE_A2DPVO:\
_mci->num_sco++; \
break; \
default: \
@@ -73,6 +95,7 @@
_mci->num_pan--; \
break; \
case MCI_GPM_COEX_PROFILE_VOICE: \
+ case MCI_GPM_COEX_PROFILE_A2DPVO:\
_mci->num_sco--; \
break; \
default: \
@@ -113,6 +136,7 @@ struct ath_mci_profile {
u8 num_pan;
u8 num_other_acl;
u8 num_bdr;
+ u8 voice_priority;
};
struct ath_mci_buf {
@@ -130,13 +154,25 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
int ath_mci_setup(struct ath_softc *sc);
void ath_mci_cleanup(struct ath_softc *sc);
void ath_mci_intr(struct ath_softc *sc);
+void ath9k_mci_update_rssi(struct ath_softc *sc);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
void ath_mci_enable(struct ath_softc *sc);
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all);
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx);
#else
static inline void ath_mci_enable(struct ath_softc *sc)
{
}
+static inline void ath9k_mci_update_wlan_channels(struct ath_softc *sc,
+ bool allow_all)
+{
+}
+static inline void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx)
+{
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 83d16e7..a04028b 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1105,7 +1105,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
rs.is_mybeacon = false;
- sc->rx.num_pkts++;
+ if (ieee80211_is_data_present(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control))
+ sc->rx.num_pkts++;
+
ath_debug_stat_rx(sc, &rs);
/*
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4e6760f..ad3c82c 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -907,10 +907,6 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
-#define AR_SREV_9462_20_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
- ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
-
#define AR_SREV_9565(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
@@ -2315,6 +2311,8 @@ enum {
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
#define AR_BTCOEX_CTRL2 0x1948
#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
@@ -2360,4 +2358,11 @@ enum {
#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
+#define AR_MCI_MISC 0x1a74
+#define AR_MCI_MISC_HW_FIX_EN 0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S 0
+#define AR_MCI_DBG_CNT_CTRL 0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index a483d51..9f85630 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -118,7 +118,7 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
(ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
- if (AR_SREV_9462_20_OR_LATER(ah)) {
+ if (AR_SREV_9462_20(ah)) {
/* AR9462 2.0 has an extra descriptor word (time based
* discard) compared to other chips */
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index e3b1b6e..24d75ab 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -343,7 +343,24 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
break;
}
} else {
- mac_addr = NULL;
+ /*
+ * Enable monitor mode
+ *
+ * rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
+ * sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
+ *
+ * When the hardware is in SNIFFER_PROMISC mode,
+ * it generates spurious ACKs for every incoming
+ * frame. This confuses every peer in the
+ * vicinity and the network throughput will suffer
+ * badly.
+ *
+ * Hence, the hardware will be put into station
+ * mode and just the rx filters are disabled.
+ */
+ cam_mode |= AR9170_MAC_CAM_STA;
+ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+ mac_addr = common->macaddr;
bssid = NULL;
}
rcu_read_unlock();
@@ -355,8 +372,6 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
if (ar->sniffer_enabled) {
- rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
- sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index a0b7230..6d22382 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -164,9 +164,6 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
struct carl9170_rsp *cmd = buf;
struct ieee80211_vif *vif;
- if (carl9170_check_sequence(ar, cmd->hdr.seq))
- return;
-
if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
carl9170_cmd_callback(ar, len, buf);
@@ -663,6 +660,35 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
return false;
}
+static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *skb;
+
+ /* (driver) frame trap handler
+ *
+ * Because power-saving mode handing has to be implemented by
+ * the driver/firmware. We have to check each incoming beacon
+ * from the associated AP, if there's new data for us (either
+ * broadcast/multicast or unicast) we have to react quickly.
+ *
+ * So, if you have you want to add additional frame trap
+ * handlers, this would be the perfect place!
+ */
+
+ carl9170_ps_beacon(ar, buf, len);
+
+ carl9170_ba_check(ar, buf, len);
+
+ skb = carl9170_rx_copy_data(buf, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+ ieee80211_rx(ar->hw, skb);
+ return 0;
+}
+
/*
* If the frame alignment is right (or the kernel has
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
@@ -672,14 +698,12 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
* mode, and we need to observe the proper ordering,
* this is non-trivial.
*/
-
-static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
+static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
{
struct ar9170_rx_head *head;
struct ar9170_rx_macstatus *mac;
struct ar9170_rx_phystatus *phy = NULL;
struct ieee80211_rx_status status;
- struct sk_buff *skb;
int mpdu_len;
u8 mac_status;
@@ -791,18 +815,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
if (phy)
carl9170_rx_phy_status(ar, phy, &status);
- carl9170_ps_beacon(ar, buf, mpdu_len);
-
- carl9170_ba_check(ar, buf, mpdu_len);
-
- skb = carl9170_rx_copy_data(buf, mpdu_len);
- if (!skb)
+ if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status))
goto drop;
- memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
- ieee80211_rx(ar->hw, skb);
return;
-
drop:
ar->rx_dropped++;
}
@@ -820,6 +836,9 @@ static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
if (unlikely(i > resplen))
break;
+ if (carl9170_check_sequence(ar, cmd->hdr.seq))
+ break;
+
carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
}
@@ -851,7 +870,7 @@ static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
if (i == 12)
carl9170_rx_untie_cmds(ar, buf, len);
else
- carl9170_handle_mpdu(ar, buf, len);
+ carl9170_rx_untie_data(ar, buf, len);
}
static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 888152c..307bc0d 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -295,6 +295,13 @@ static void carl9170_usb_rx_irq_complete(struct urb *urb)
goto resubmit;
}
+ /*
+ * While the carl9170 firmware does not use this EP, the
+ * firmware loader in the EEPROM unfortunately does.
+ * Therefore we need to be ready to handle out-of-band
+ * responses and traps in case the firmware crashed and
+ * the loader took over again.
+ */
carl9170_handle_command_response(ar, urb->transfer_buffer,
urb->actual_length);
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 19befb3..39e8a59 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -20,8 +20,8 @@
#include "ath.h"
#include "reg.h"
-#define REG_READ (common->ops->read)
-#define REG_WRITE (common->ops->write)
+#define REG_READ (common->ops->read)
+#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
/**
* ath_hw_set_bssid_mask - filter out bssids we listen
@@ -119,8 +119,8 @@ void ath_hw_setbssidmask(struct ath_common *common)
{
void *ah = common->ah;
- REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL);
- REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
+ REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
+ REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
}
EXPORT_SYMBOL(ath_hw_setbssidmask);
@@ -139,7 +139,7 @@ void ath_hw_cycle_counters_update(struct ath_common *common)
void *ah = common->ah;
/* freeze */
- REG_WRITE(ah, AR_MIBC_FMC, AR_MIBC);
+ REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
/* read */
cycles = REG_READ(ah, AR_CCCNT);
@@ -148,13 +148,13 @@ void ath_hw_cycle_counters_update(struct ath_common *common)
tx = REG_READ(ah, AR_TFCNT);
/* clear */
- REG_WRITE(ah, 0, AR_CCCNT);
- REG_WRITE(ah, 0, AR_RFCNT);
- REG_WRITE(ah, 0, AR_RCCNT);
- REG_WRITE(ah, 0, AR_TFCNT);
+ REG_WRITE(ah, AR_CCCNT, 0);
+ REG_WRITE(ah, AR_RFCNT, 0);
+ REG_WRITE(ah, AR_RCCNT, 0);
+ REG_WRITE(ah, AR_TFCNT, 0);
/* unfreeze */
- REG_WRITE(ah, 0, AR_MIBC);
+ REG_WRITE(ah, AR_MIBC, 0);
/* update all cycle counters here */
common->cc_ani.cycles += cycles;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c5a99c8..ddd6a4f 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4652,7 +4652,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
- bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci,
+ bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
dev->dev->bdev, true);
break;
#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 9d5170b..fe80b63 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
+ fwil.o \
dhd_cdc.o \
dhd_common.o \
dhd_linux.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 17e7ae7..8704daa 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -318,6 +318,12 @@ struct brcmf_event {
#define BRCMF_E_LINK_ASSOC_REC 3
#define BRCMF_E_LINK_BSSCFG_DIS 4
+/* Small, medium and maximum buffer size for dcmd
+ */
+#define BRCMF_DCMD_SMLEN 256
+#define BRCMF_DCMD_MEDLEN 1536
+#define BRCMF_DCMD_MAXLEN 8192
+
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -623,7 +629,6 @@ struct brcmf_pub {
u8 wme_dp; /* wme discard priority */
/* Dongle media info */
- bool iswl; /* Dongle-resident driver is wl */
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
@@ -651,16 +656,10 @@ struct brcmf_pub {
int in_suspend; /* flag set to 1 when early suspend called */
int dtim_skip; /* dtim skip , default 0 means wake each dtim */
- /* Pkt filter defination */
- char *pktfilter[100];
- int pktfilter_count;
-
- u8 country_code[BRCM_CNTRY_BUF_SZ];
- char eventmask[BRCMF_EVENTING_MASK_LEN];
-
struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct mutex proto_block;
+ unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
struct work_struct setmacaddr_work;
struct work_struct multicast_work;
@@ -671,6 +670,11 @@ struct brcmf_pub {
#endif
};
+struct bcmevent_name {
+ uint event;
+ const char *name;
+};
+
struct brcmf_if_event {
u8 ifidx;
u8 action;
@@ -678,47 +682,60 @@ struct brcmf_if_event {
u8 bssidx;
};
-struct bcmevent_name {
- uint event;
- const char *name;
+/* forward declaration */
+struct brcmf_cfg80211_vif;
+
+/**
+ * struct brcmf_if - interface control information.
+ *
+ * @drvr: points to device related information.
+ * @vif: points to cfg80211 specific interface information.
+ * @ndev: associated network device.
+ * @stats: interface specific network statistics.
+ * @idx: interface index in device firmware.
+ * @bssidx: index of bss associated with this interface.
+ * @mac_addr: assigned mac address.
+ */
+struct brcmf_if {
+ struct brcmf_pub *drvr;
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ int idx;
+ s32 bssidx;
+ u8 mac_addr[ETH_ALEN];
};
+static inline s32 brcmf_ndev_bssidx(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ return ifp->bssidx;
+}
+
extern const struct bcmevent_name bcmevent_names[];
extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
char *buf, uint len);
-extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
- char *buf, uint buflen, s32 bssidx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
-extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
-extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
-
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
/* Query dongle */
extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
-
-#ifdef DEBUG
-extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
-#endif /* DEBUG */
+extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
void *pktdata, struct brcmf_event_msg *,
void **data_ptr);
+extern int brcmf_net_attach(struct brcmf_if *ifp);
+extern struct brcmf_if *brcmf_add_if(struct device *dev, int ifidx, s32 bssidx,
+ char *name, u8 *mac_addr);
extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
-extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
-extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
- int enable, int master_mode);
-
-#define BRCMF_DCMD_SMLEN 256 /* "small" cmd buffer required */
-#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
-#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
-
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 9b8ee19..265580f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -111,9 +111,6 @@ extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
extern int brcmf_bus_start(struct device *dev);
-extern int brcmf_add_if(struct device *dev, int ifidx,
- char *name, u8 *mac_addr);
-
#ifdef CONFIG_BRCMFMAC_SDIO
extern void brcmf_sdio_exit(void);
extern void brcmf_sdio_init(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a5c15ca..b9d8a5a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -458,35 +458,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr)
drvr->prot = NULL;
}
-int brcmf_proto_init(struct brcmf_pub *drvr)
-{
- int ret = 0;
- char buf[128];
-
- brcmf_dbg(TRACE, "Enter\n");
-
- mutex_lock(&drvr->proto_block);
-
- /* Get the device MAC address */
- strcpy(buf, "cur_etheraddr");
- ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
- buf, sizeof(buf));
- if (ret < 0) {
- mutex_unlock(&drvr->proto_block);
- return ret;
- }
- memcpy(drvr->mac, buf, ETH_ALEN);
-
- mutex_unlock(&drvr->proto_block);
-
- ret = brcmf_c_preinit_dcmds(drvr);
-
- /* Always assumes wl for now */
- drvr->iswl = true;
-
- return ret;
-}
-
void brcmf_proto_stop(struct brcmf_pub *drvr)
{
/* Nothing to do for CDC */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 15c5db5..866b669 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -28,12 +28,17 @@
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
+#include "fwil.h"
#define BRCM_OUI "\x00\x10\x18"
#define DOT11_OUI_LEN 3
#define BCMILCP_BCM_SUBTYPE_EVENT 1
-#define PKTFILTER_BUF_SIZE 2048
+#define PKTFILTER_BUF_SIZE 128
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
+#define BRCMF_DEFAULT_BCN_TIMEOUT 3
+#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
+#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
+#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
#define MSGTRACE_VERSION 1
@@ -88,52 +93,6 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
-uint
-brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
- char *buf, uint buflen, s32 bssidx)
-{
- const s8 *prefix = "bsscfg:";
- s8 *p;
- u32 prefixlen;
- u32 namelen;
- u32 iolen;
- __le32 bssidx_le;
-
- if (bssidx == 0)
- return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
-
- prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
- namelen = (u32) strlen(name) + 1; /* lengh of iovar name + null */
- iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
-
- if (buflen < 0 || iolen > (u32)buflen) {
- brcmf_dbg(ERROR, "buffer is too short\n");
- return 0;
- }
-
- p = buf;
-
- /* copy prefix, no null */
- memcpy(p, prefix, prefixlen);
- p += prefixlen;
-
- /* copy iovar name including null */
- memcpy(p, name, namelen);
- p += namelen;
-
- /* bss config index as first data */
- bssidx_le = cpu_to_le32(bssidx);
- memcpy(p, &bssidx_le, sizeof(bssidx_le));
- p += sizeof(bssidx_le);
-
- /* parameter buffer follows */
- if (datalen)
- memcpy(p, data, datalen);
-
- return iolen;
-
-}
-
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
{
@@ -490,6 +449,7 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
/* check whether packet is a BRCM event pkt */
struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
struct brcmf_if_event *ifevent;
+ struct brcmf_if *ifp;
char *event_data;
u32 type, status;
u16 flags;
@@ -525,12 +485,17 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
brcmf_dbg(TRACE, "if event\n");
if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
- if (ifevent->action == BRCMF_E_IF_ADD)
- brcmf_add_if(drvr->dev, ifevent->ifidx,
- event->ifname,
- pvt_data->eth.h_dest);
- else
+ if (ifevent->action == BRCMF_E_IF_ADD) {
+ ifp = brcmf_add_if(drvr->dev, ifevent->ifidx,
+ ifevent->bssidx,
+ event->ifname,
+ pvt_data->eth.h_dest);
+ if (IS_ERR(ifp))
+ return PTR_ERR(ifp);
+ brcmf_net_attach(ifp);
+ } else {
brcmf_del_if(drvr, ifevent->ifidx);
+ }
} else {
brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
ifevent->ifidx, event->ifname);
@@ -603,90 +568,57 @@ static int brcmf_c_pattern_atoh(char *src, char *dst)
return i;
}
-void
-brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable,
- int master_mode)
+static void
+brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable,
+ int master_mode)
{
unsigned long res;
- char *argv[8];
- int i = 0;
- const char *str;
- int buf_len;
- int str_len;
+ char *argv;
char *arg_save = NULL, *arg_org = NULL;
- int rc;
- char buf[128];
+ s32 err;
struct brcmf_pkt_filter_enable_le enable_parm;
- struct brcmf_pkt_filter_enable_le *pkt_filterp;
- __le32 mmode_le;
- arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
+ arg_save = kstrdup(arg, GFP_ATOMIC);
if (!arg_save)
goto fail;
arg_org = arg_save;
- memcpy(arg_save, arg, strlen(arg) + 1);
- argv[i] = strsep(&arg_save, " ");
+ argv = strsep(&arg_save, " ");
- i = 0;
- if (NULL == argv[i]) {
+ if (argv == NULL) {
brcmf_dbg(ERROR, "No args provided\n");
goto fail;
}
- str = "pkt_filter_enable";
- str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[str_len] = '\0';
- buf_len = str_len + 1;
-
- pkt_filterp = (struct brcmf_pkt_filter_enable_le *) (buf + str_len + 1);
-
/* Parse packet filter id. */
enable_parm.id = 0;
- if (!kstrtoul(argv[i], 0, &res))
+ if (!kstrtoul(argv, 0, &res))
enable_parm.id = cpu_to_le32((u32)res);
- /* Parse enable/disable value. */
+ /* Enable/disable the specified filter. */
enable_parm.enable = cpu_to_le32(enable);
- buf_len += sizeof(enable_parm);
- memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm));
+ err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm,
+ sizeof(enable_parm));
+ if (err)
+ brcmf_dbg(ERROR, "Set pkt_filter_enable error (%d)\n", err);
- /* Enable/disable the specified filter. */
- rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
- rc = rc >= 0 ? 0 : rc;
- if (rc)
- brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
- arg, rc);
- else
- brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
-
- /* Contorl the master mode */
- mmode_le = cpu_to_le32(master_mode);
- brcmf_c_mkiovar("pkt_filter_mode", (char *)&mmode_le, 4, buf,
- sizeof(buf));
- rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf,
- sizeof(buf));
- rc = rc >= 0 ? 0 : rc;
- if (rc)
- brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
- arg, rc);
+ /* Control the master mode */
+ err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode);
+ if (err)
+ brcmf_dbg(ERROR, "Set pkt_filter_mode error (%d)\n", err);
fail:
kfree(arg_org);
}
-void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
+static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg)
{
- const char *str;
- struct brcmf_pkt_filter_le pkt_filter;
- struct brcmf_pkt_filter_le *pkt_filterp;
+ struct brcmf_pkt_filter_le *pkt_filter;
unsigned long res;
int buf_len;
- int str_len;
- int rc;
+ s32 err;
u32 mask_size;
u32 pattern_size;
char *argv[8], *buf = NULL;
@@ -704,104 +636,64 @@ void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
goto fail;
argv[i] = strsep(&arg_save, " ");
- while (argv[i++])
+ while (argv[i]) {
+ i++;
+ if (i >= 8) {
+ brcmf_dbg(ERROR, "Too many parameters\n");
+ goto fail;
+ }
argv[i] = strsep(&arg_save, " ");
+ }
- i = 0;
- if (NULL == argv[i]) {
- brcmf_dbg(ERROR, "No args provided\n");
+ if (i != 6) {
+ brcmf_dbg(ERROR, "Not enough args provided %d\n", i);
goto fail;
}
- str = "pkt_filter_add";
- strcpy(buf, str);
- str_len = strlen(str);
- buf_len = str_len + 1;
-
- pkt_filterp = (struct brcmf_pkt_filter_le *) (buf + str_len + 1);
+ pkt_filter = (struct brcmf_pkt_filter_le *)buf;
/* Parse packet filter id. */
- pkt_filter.id = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.id = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Polarity not provided\n");
- goto fail;
- }
+ pkt_filter->id = 0;
+ if (!kstrtoul(argv[0], 0, &res))
+ pkt_filter->id = cpu_to_le32((u32)res);
/* Parse filter polarity. */
- pkt_filter.negate_match = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.negate_match = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Filter type not provided\n");
- goto fail;
- }
+ pkt_filter->negate_match = 0;
+ if (!kstrtoul(argv[1], 0, &res))
+ pkt_filter->negate_match = cpu_to_le32((u32)res);
/* Parse filter type. */
- pkt_filter.type = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.type = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Offset not provided\n");
- goto fail;
- }
+ pkt_filter->type = 0;
+ if (!kstrtoul(argv[2], 0, &res))
+ pkt_filter->type = cpu_to_le32((u32)res);
/* Parse pattern filter offset. */
- pkt_filter.u.pattern.offset = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.u.pattern.offset = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Bitmask not provided\n");
- goto fail;
- }
+ pkt_filter->u.pattern.offset = 0;
+ if (!kstrtoul(argv[3], 0, &res))
+ pkt_filter->u.pattern.offset = cpu_to_le32((u32)res);
/* Parse pattern filter mask. */
- mask_size =
- brcmf_c_pattern_atoh
- (argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Pattern not provided\n");
- goto fail;
- }
+ mask_size = brcmf_c_pattern_atoh(argv[4],
+ (char *)pkt_filter->u.pattern.mask_and_pattern);
/* Parse pattern filter pattern. */
- pattern_size =
- brcmf_c_pattern_atoh(argv[i],
- (char *)&pkt_filterp->u.pattern.
- mask_and_pattern[mask_size]);
+ pattern_size = brcmf_c_pattern_atoh(argv[5],
+ (char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]);
if (mask_size != pattern_size) {
brcmf_dbg(ERROR, "Mask and pattern not the same size\n");
goto fail;
}
- pkt_filter.u.pattern.size_bytes = cpu_to_le32(mask_size);
- buf_len += BRCMF_PKT_FILTER_FIXED_LEN;
- buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
-
- /* Keep-alive attributes are set in local
- * variable (keep_alive_pkt), and
- ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
- ** guarantee that the buffer is properly aligned.
- */
- memcpy((char *)pkt_filterp,
- &pkt_filter,
- BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
-
- rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
- rc = rc >= 0 ? 0 : rc;
+ pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size);
+ buf_len = sizeof(*pkt_filter);
+ buf_len -= sizeof(pkt_filter->u.pattern.mask_and_pattern);
+ buf_len += mask_size + pattern_size;
- if (rc)
- brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
- arg, rc);
- else
- brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
+ err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter,
+ buf_len);
+ if (err)
+ brcmf_dbg(ERROR, "Set pkt_filter_add error (%d)\n", err);
fail:
kfree(arg_org);
@@ -809,130 +701,125 @@ fail:
kfree(buf);
}
-static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
- char iovbuf[32];
- int retcode;
- __le32 arp_mode_le;
-
- arp_mode_le = cpu_to_le32(arp_mode);
- brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
- sizeof(iovbuf));
- retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- retcode = retcode >= 0 ? 0 : retcode;
- if (retcode)
- brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, retcode = %d\n",
- arp_mode, retcode);
- else
- brcmf_dbg(TRACE, "successfully set ARP offload mode to 0x%x\n",
- arp_mode);
-}
-
-static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
-{
- char iovbuf[32];
- int retcode;
- __le32 arp_enable_le;
-
- arp_enable_le = cpu_to_le32(arp_enable);
-
- brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
- iovbuf, sizeof(iovbuf));
- retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- retcode = retcode >= 0 ? 0 : retcode;
- if (retcode)
- brcmf_dbg(TRACE, "failed to enable ARP offload to %d, retcode = %d\n",
- arp_enable, retcode);
- else
- brcmf_dbg(TRACE, "successfully enabled ARP offload to %d\n",
- arp_enable);
-}
-
-int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
-{
- char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
- "event_msgs" + '\0' + bitvec */
- char buf[128], *ptr;
- __le32 roaming_le = cpu_to_le32(1);
- __le32 bcn_timeout_le = cpu_to_le32(3);
- __le32 scan_assoc_time_le = cpu_to_le32(40);
- __le32 scan_unassoc_time_le = cpu_to_le32(40);
- int i;
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ u8 buf[BRCMF_DCMD_SMLEN];
+ char *ptr;
+ s32 err;
struct brcmf_bus_dcmd *cmdlst;
struct list_head *cur, *q;
- mutex_lock(&drvr->proto_block);
-
- /* Set Country code */
- if (drvr->country_code[0] != 0) {
- if (brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_COUNTRY,
- drvr->country_code,
- sizeof(drvr->country_code)) < 0)
- brcmf_dbg(ERROR, "country code setting failed\n");
+ /* retreive mac address */
+ err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+ sizeof(ifp->mac_addr));
+ if (err < 0) {
+ brcmf_dbg(ERROR, "Retreiving cur_etheraddr failed, %d\n",
+ err);
+ goto done;
}
+ memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
- ptr = buf;
- brcmf_c_mkiovar("ver", NULL, 0, buf, sizeof(buf));
- brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf));
+ strcpy(buf, "ver");
+ err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
+ if (err < 0) {
+ brcmf_dbg(ERROR, "Retreiving version information failed, %d\n",
+ err);
+ goto done;
+ }
+ ptr = (char *)buf;
strsep(&ptr, "\n");
/* Print fw version info */
brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
- /* Setup timeout if Beacons are lost and roam is off to report
- link down */
- brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
+ /*
+ * Setup timeout if Beacons are lost and roam is off to report
+ * link down
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
+ BRCMF_DEFAULT_BCN_TIMEOUT);
+ if (err) {
+ brcmf_dbg(ERROR, "bcn_timeout error (%d)\n", err);
+ goto done;
+ }
/* Enable/Disable build-in roaming to allowed ext supplicant to take
- of romaing */
- brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* Setup event_msgs */
- brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
-
- /* Set and enable ARP offload feature */
- brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
- brcmf_c_arp_offload_enable(drvr, true);
-
- /* Set up pkt filter */
- for (i = 0; i < drvr->pktfilter_count; i++) {
- brcmf_c_pktfilter_offload_set(drvr, drvr->pktfilter[i]);
- brcmf_c_pktfilter_offload_enable(drvr, drvr->pktfilter[i],
- 0, true);
+ * of romaing
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (err) {
+ brcmf_dbg(ERROR, "roam_off error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup event_msgs, enable E_IF */
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_dbg(ERROR, "Get event_msgs error (%d)\n", err);
+ goto done;
+ }
+ setbit(eventmask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_dbg(ERROR, "Set event_msgs error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup default scan channel time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
+ if (err) {
+ brcmf_dbg(ERROR, "BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+ err);
+ goto done;
}
+ /* Setup default scan unassoc time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
+ if (err) {
+ brcmf_dbg(ERROR, "BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* Try to set and enable ARP offload feature, this may fail */
+ err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
+ BRCMF_ARPOL_MODE, err);
+ err = 0;
+ } else {
+ err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n",
+ err);
+ err = 0;
+ } else
+ brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n",
+ BRCMF_ARPOL_MODE);
+ }
+
+ /* Setup packet filter */
+ brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
+ brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
+ 0, true);
+
/* set bus specific command if there is any */
- list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
+ list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
- brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
- cmdlst->param_len, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
+ brcmf_fil_iovar_data_set(ifp, cmdlst->name,
+ cmdlst->param,
+ cmdlst->param_len);
}
list_del(cur);
kfree(cmdlst);
}
-
- mutex_unlock(&drvr->proto_block);
-
- return 0;
+done:
+ return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 7f89540..862d2ac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -16,8 +16,10 @@
#include <linux/debugfs.h>
#include <linux/if_ether.h>
#include <linux/if.h>
+#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/module.h>
+#include <linux/netdevice.h>
#include <defs.h>
#include <brcmu_wifi.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index fb508c2..eefa6c2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -31,6 +31,7 @@
#define BRCMF_EVENT_VAL 0x0800
#define BRCMF_BTA_VAL 0x1000
#define BRCMF_ISCAN_VAL 0x2000
+#define BRCMF_FIL_VAL 0x4000
#if defined(DEBUG)
@@ -56,6 +57,7 @@ do { \
#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
+#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
#else /* (defined DEBUG) || (defined DEBUG) */
@@ -67,6 +69,7 @@ do { \
#define BRCMF_BYTES_ON() 0
#define BRCMF_GLOM_ON() 0
#define BRCMF_EVENT_ON() 0
+#define BRCMF_FIL_ON() 0
#endif /* defined(DEBUG) */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d7c76ce..2976523 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -52,16 +52,6 @@ MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
MODULE_LICENSE("Dual BSD/GPL");
-/* Interface control information */
-struct brcmf_if {
- struct brcmf_pub *drvr; /* back pointer to brcmf_pub */
- /* OS/stack specifics */
- struct net_device *ndev;
- struct net_device_stats stats;
- int idx; /* iface idx in dongle */
- u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
-};
-
/* Error bits */
int brcmf_msg_level = BRCMF_ERROR_VAL;
module_param(brcmf_msg_level, int, 0);
@@ -629,12 +619,9 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
brcmf_dbg(ERROR, "dongle is not up\n");
return -ENODEV;
}
-
/* finally, report dongle driver type */
- else if (drvr->iswl)
- sprintf(info.driver, "wl");
else
- sprintf(info.driver, "xx");
+ sprintf(info.driver, "wl");
sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -719,65 +706,6 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
return -EOPNOTSUPP;
}
-/* called only from within this driver. Sends a command to the dongle. */
-s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
-{
- struct brcmf_dcmd dcmd;
- s32 err = 0;
- int buflen = 0;
- bool is_set_key_cmd;
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- memset(&dcmd, 0, sizeof(dcmd));
- dcmd.cmd = cmd;
- dcmd.buf = arg;
- dcmd.len = len;
-
- if (dcmd.buf != NULL)
- buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
-
- /* send to dongle (must be up, and wl) */
- if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
- brcmf_dbg(ERROR, "DONGLE_DOWN\n");
- err = -EIO;
- goto done;
- }
-
- if (!drvr->iswl) {
- err = -EIO;
- goto done;
- }
-
- /*
- * Intercept BRCMF_C_SET_KEY CMD - serialize M4 send and
- * set key CMD to prevent M4 encryption.
- */
- is_set_key_cmd = ((dcmd.cmd == BRCMF_C_SET_KEY) ||
- ((dcmd.cmd == BRCMF_C_SET_VAR) &&
- !(strncmp("wsec_key", dcmd.buf, 9))) ||
- ((dcmd.cmd == BRCMF_C_SET_VAR) &&
- !(strncmp("bsscfg:wsec_key", dcmd.buf, 15))));
- if (is_set_key_cmd)
- brcmf_netdev_wait_pend8021x(ndev);
-
- err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
-
-done:
- if (err > 0)
- err = 0;
-
- return err;
-}
-
-int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
-{
- brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
- dcmd->cmd, dcmd->buf, dcmd->len);
-
- return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
-}
-
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -851,7 +779,7 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-static int brcmf_net_attach(struct brcmf_if *ifp)
+int brcmf_net_attach(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
@@ -885,15 +813,6 @@ static int brcmf_net_attach(struct brcmf_if *ifp)
memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
- /* attach to cfg80211 for primary interface */
- if (!ifp->idx) {
- drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
- if (drvr->config == NULL) {
- brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
- goto fail;
- }
- }
-
if (register_netdev(ndev) != 0) {
brcmf_dbg(ERROR, "couldn't register the net device\n");
goto fail;
@@ -905,11 +824,12 @@ static int brcmf_net_attach(struct brcmf_if *ifp)
fail:
ndev->netdev_ops = NULL;
+ free_netdev(ndev);
return -EBADE;
}
-int
-brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
+struct brcmf_if *brcmf_add_if(struct device *dev, int ifidx, s32 bssidx,
+ char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
struct net_device *ndev;
@@ -936,7 +856,7 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
if (!ndev) {
brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
ifp = netdev_priv(ndev);
@@ -944,20 +864,14 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
ifp->drvr = drvr;
drvr->iflist[ifidx] = ifp;
ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
if (mac_addr != NULL)
memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
- if (brcmf_net_attach(ifp)) {
- brcmf_dbg(ERROR, "brcmf_net_attach failed");
- free_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
- return -EOPNOTSUPP;
- }
-
brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
current->pid, ifp->ndev->name);
- return 0;
+ return ifp;
}
void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
@@ -1036,10 +950,9 @@ fail:
int brcmf_bus_start(struct device *dev)
{
int ret = -1;
- /* Room for "event_msgs" + '\0' + bitvec */
- char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_if *ifp;
brcmf_dbg(TRACE, "\n");
@@ -1050,49 +963,30 @@ int brcmf_bus_start(struct device *dev)
return ret;
}
- brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
- sizeof(iovbuf));
- memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
- setbit(drvr->eventmask, BRCMF_E_SET_SSID);
- setbit(drvr->eventmask, BRCMF_E_PRUNE);
- setbit(drvr->eventmask, BRCMF_E_AUTH);
- setbit(drvr->eventmask, BRCMF_E_REASSOC);
- setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
- setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_DISASSOC);
- setbit(drvr->eventmask, BRCMF_E_JOIN);
- setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
- setbit(drvr->eventmask, BRCMF_E_LINK);
- setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
- setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
- setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
- setbit(drvr->eventmask, BRCMF_E_TXFAIL);
- setbit(drvr->eventmask, BRCMF_E_JOIN_START);
- setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
-
-/* enable dongle roaming event */
-
- drvr->pktfilter_count = 1;
- /* Setup filter to allow only unicast */
- drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
-
- /* Bus is ready, do any protocol initialization */
- ret = brcmf_proto_init(drvr);
+ /* add primary networking interface */
+ ifp = brcmf_add_if(dev, 0, 0, "wlan%d", NULL);
+ if (IS_ERR(ifp))
+ return PTR_ERR(ifp);
+
+ /* signal bus ready */
+ bus_if->state = BRCMF_BUS_DATA;
+
+ /* Bus is ready, do any initialization */
+ ret = brcmf_c_preinit_dcmds(ifp);
if (ret < 0)
return ret;
- /* add primary networking interface */
- ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac);
- if (ret < 0)
+ drvr->config = brcmf_cfg80211_attach(drvr);
+ if (drvr->config == NULL)
+ return -ENOMEM;
+
+ ret = brcmf_net_attach(ifp);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "brcmf_net_attach failed");
+ drvr->iflist[0] = NULL;
return ret;
+ }
- /* signal bus ready */
- bus_if->state = BRCMF_BUS_DATA;
return 0;
}
@@ -1163,42 +1057,6 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
return pend;
}
-#ifdef DEBUG
-int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size)
-{
- int ret = 0;
- struct file *fp;
- mm_segment_t old_fs;
- loff_t pos = 0;
-
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- /* open file to write */
- fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
- if (!fp) {
- brcmf_dbg(ERROR, "open file error\n");
- ret = -1;
- goto exit;
- }
-
- /* Write buf to file */
- fp->f_op->write(fp, (char __user *)buf, size, &pos);
-
-exit:
- /* free buf before return */
- kfree(buf);
- /* close file before return */
- if (fp)
- filp_close(fp, NULL);
- /* restore previous address limit */
- set_fs(old_fs);
-
- return ret;
-}
-#endif /* DEBUG */
-
static void brcmf_driver_init(struct work_struct *work)
{
brcmf_debugfs_init();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 6bc4425..7fe6779 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -27,11 +27,6 @@ extern int brcmf_proto_attach(struct brcmf_pub *drvr);
/* Unlink, frees allocated protocol memory (including brcmf_proto) */
extern void brcmf_proto_detach(struct brcmf_pub *drvr);
-/* Initialize protocol: sync w/dongle state.
- * Sets dongle media info (iswl, drv_version, mac address).
- */
-extern int brcmf_proto_init(struct brcmf_pub *drvr);
-
/* Stop protocol: sync w/dongle state. */
extern void brcmf_proto_stop(struct brcmf_pub *drvr);
@@ -45,7 +40,8 @@ extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
struct brcmf_dcmd *dcmd, int len);
-extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
+/* Sets dongle media info (drv_version, mac address). */
+extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3564686..415f2be 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -614,6 +614,12 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4
+enum brcmf_sdio_frmtype {
+ BRCMF_SDIO_FT_NORMAL,
+ BRCMF_SDIO_FT_SUPER,
+ BRCMF_SDIO_FT_SUB,
+};
+
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@@ -1032,7 +1038,8 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
}
static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
- struct brcmf_sdio_read *rd)
+ struct brcmf_sdio_read *rd,
+ enum brcmf_sdio_frmtype type)
{
u16 len, checksum;
u8 rx_seq, fc, tx_seq_max;
@@ -1059,6 +1066,15 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
brcmf_dbg(ERROR, "HW header length error\n");
return false;
}
+ if (type == BRCMF_SDIO_FT_SUPER &&
+ (roundup(len, bus->blocksize) != rd->len)) {
+ brcmf_dbg(ERROR, "HW superframe header length error\n");
+ return false;
+ }
+ if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
+ brcmf_dbg(ERROR, "HW subframe header length error\n");
+ return false;
+ }
rd->len = len;
/*
@@ -1071,9 +1087,16 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
* Byte 5: Maximum Sequence number allow for Tx
* Byte 6~7: Reserved
*/
+ if (type == BRCMF_SDIO_FT_SUPER &&
+ SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
+ brcmf_dbg(ERROR, "Glom descriptor found in superframe head\n");
+ rd->len = 0;
+ return false;
+ }
rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
- if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
+ if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
+ type != BRCMF_SDIO_FT_SUPER) {
brcmf_dbg(ERROR, "HW header length too long\n");
bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
@@ -1081,6 +1104,17 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
rd->len = 0;
return false;
}
+ if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
+ brcmf_dbg(ERROR, "Wrong channel for superframe\n");
+ rd->len = 0;
+ return false;
+ }
+ if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
+ rd->channel != SDPCM_EVENT_CHANNEL) {
+ brcmf_dbg(ERROR, "Wrong channel for subframe\n");
+ rd->len = 0;
+ return false;
+ }
rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
@@ -1095,6 +1129,9 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
bus->sdcnt.rx_badseq++;
rd->seq_num = rx_seq;
}
+ /* no need to check the reset for subframe */
+ if (type == BRCMF_SDIO_FT_SUB)
+ return true;
rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
/* only warm for NON glom packet */
@@ -1126,16 +1163,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
u16 dlen, totlen;
u8 *dptr, num = 0;
- u16 sublen, check;
+ u16 sublen;
struct sk_buff *pfirst, *pnext;
int errcode;
- u8 chan, seq, doff, sfdoff;
- u8 txmax;
+ u8 doff, sfdoff;
int ifidx = 0;
bool usechain = bus->use_rxchain;
- u16 next_len;
+
+ struct brcmf_sdio_read rd_new;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
@@ -1279,68 +1316,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst->data, min_t(int, pfirst->len, 48),
"SUPERFRAME:\n");
- /* Validate the superframe header */
- dptr = (u8 *) (pfirst->data);
- sublen = get_unaligned_le16(dptr);
- check = get_unaligned_le16(dptr + sizeof(u16));
-
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
- next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
- if ((next_len << 4) > MAX_RX_DATASZ) {
- brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
- next_len, seq);
- next_len = 0;
- }
- bus->cur_read.len = next_len << 4;
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-
- errcode = 0;
- if ((u16)~(sublen ^ check)) {
- brcmf_dbg(ERROR, "(superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
- sublen, check);
- errcode = -1;
- } else if (roundup(sublen, bus->blocksize) != dlen) {
- brcmf_dbg(ERROR, "(superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
- sublen, roundup(sublen, bus->blocksize),
- dlen);
- errcode = -1;
- } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
- SDPCM_GLOM_CHANNEL) {
- brcmf_dbg(ERROR, "(superframe): bad channel %d\n",
- SDPCM_PACKET_CHANNEL(
- &dptr[SDPCM_FRAMETAG_LEN]));
- errcode = -1;
- } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
- brcmf_dbg(ERROR, "(superframe): got 2nd descriptor?\n");
- errcode = -1;
- } else if ((doff < SDPCM_HDRLEN) ||
- (doff > (pfirst->len - SDPCM_HDRLEN))) {
- brcmf_dbg(ERROR, "(superframe): Bad data offset %d: HW %d pkt %d min %d\n",
- doff, sublen, pfirst->len, SDPCM_HDRLEN);
- errcode = -1;
- }
-
- /* Check sequence number of superframe SW header */
- if (rxseq != seq) {
- brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
- seq, rxseq);
- bus->sdcnt.rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
- txmax, bus->tx_seq);
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
+ rd_new.seq_num = rxseq;
+ rd_new.len = dlen;
+ errcode = -!brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
+ BRCMF_SDIO_FT_SUPER);
+ bus->cur_read.len = rd_new.len_nxtfrm << 4;
/* Remove superframe header, remember offset */
- skb_pull(pfirst, doff);
- sfdoff = doff;
+ skb_pull(pfirst, rd_new.dat_offset);
+ sfdoff = rd_new.dat_offset;
num = 0;
/* Validate all the subframe headers */
@@ -1349,34 +1333,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode)
break;
- dptr = (u8 *) (pnext->data);
- dlen = (u16) (pnext->len);
- sublen = get_unaligned_le16(dptr);
- check = get_unaligned_le16(dptr + sizeof(u16));
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ rd_new.len = pnext->len;
+ rd_new.seq_num = rxseq++;
+ errcode = -!brcmf_sdio_hdparser(bus, pnext->data,
+ &rd_new,
+ BRCMF_SDIO_FT_SUB);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
- dptr, 32, "subframe:\n");
+ pnext->data, 32, "subframe:\n");
- if ((u16)~(sublen ^ check)) {
- brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
- num, sublen, check);
- errcode = -1;
- } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
- brcmf_dbg(ERROR, "(subframe %d): length mismatch: len 0x%04x, expect 0x%04x\n",
- num, sublen, dlen);
- errcode = -1;
- } else if ((chan != SDPCM_DATA_CHANNEL) &&
- (chan != SDPCM_EVENT_CHANNEL)) {
- brcmf_dbg(ERROR, "(subframe %d): bad channel %d\n",
- num, chan);
- errcode = -1;
- } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
- brcmf_dbg(ERROR, "(subframe %d): Bad data offset %d: HW %d min %d\n",
- num, doff, sublen, SDPCM_HDRLEN);
- errcode = -1;
- }
- /* increase the subframe count */
num++;
}
@@ -1402,27 +1366,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
- brcmf_dbg(GLOM, "Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
- num, pfirst, pfirst->data,
- pfirst->len, sublen, chan, seq);
-
- /* precondition: chan == SDPCM_DATA_CHANNEL ||
- chan == SDPCM_EVENT_CHANNEL */
-
- if (rxseq != seq) {
- brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
- seq, rxseq);
- bus->sdcnt.rx_badseq++;
- rxseq = seq;
- }
- rxseq++;
-
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
- dptr, dlen, "Rx Subframe Data:\n");
+ dptr, pfirst->len,
+ "Rx Subframe Data:\n");
__skb_trim(pfirst, sublen);
skb_pull(pfirst, doff);
@@ -1642,7 +1590,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxhdr, SDPCM_HDRLEN,
"RxHdr:\n");
- if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
+ if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
+ BRCMF_SDIO_FT_NORMAL)) {
if (!bus->rxpending)
break;
else
@@ -1701,7 +1650,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
- if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
+ if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
+ BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
new file mode 100644
index 0000000..4b272c3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* FWIL is the Firmware Interface Layer. In this module the support functions
+ * are located to set and get variables to and from the firmware.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <defs.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+#include "fwil.h"
+
+
+static s32
+brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+
+ if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
+ brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
+ return -EIO;
+ }
+
+ if (data != NULL)
+ len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
+ if (set)
+ err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+ else
+ err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+ len);
+
+ if (err >= 0)
+ err = 0;
+ else
+ brcmf_dbg(ERROR, "Failed err=%d\n", err);
+
+ return err;
+}
+
+s32
+brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+ s32 err;
+
+ mutex_lock(&ifp->drvr->proto_block);
+
+ brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, len, "data");
+
+ err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
+ mutex_unlock(&ifp->drvr->proto_block);
+
+ return err;
+}
+
+s32
+brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+ s32 err;
+
+ mutex_lock(&ifp->drvr->proto_block);
+ err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
+
+ brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, len, "data");
+
+ mutex_unlock(&ifp->drvr->proto_block);
+
+ return err;
+}
+
+
+s32
+brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(data);
+
+ mutex_lock(&ifp->drvr->proto_block);
+ err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
+ mutex_unlock(&ifp->drvr->proto_block);
+
+ return err;
+}
+
+s32
+brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(*data);
+
+ mutex_lock(&ifp->drvr->proto_block);
+ err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
+ mutex_unlock(&ifp->drvr->proto_block);
+ *data = le32_to_cpu(data_le);
+
+ return err;
+}
+
+static u32
+brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+{
+ u32 len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ memcpy(buf, name, len);
+
+ /* append data onto the end of the name string */
+ if (data && datalen)
+ memcpy(&buf[len], data, datalen);
+
+ return len + datalen;
+}
+
+
+s32
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+ u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, len, "data");
+
+ buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+ sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+ buflen, true);
+ } else {
+ err = -EPERM;
+ brcmf_dbg(ERROR, "Creating iovar failed\n");
+ }
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+}
+
+s32
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+ sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+ buflen, false);
+ if (err == 0)
+ memcpy(data, drvr->proto_buf, len);
+ } else {
+ err = -EPERM;
+ brcmf_dbg(ERROR, "Creating iovar failed\n");
+ }
+
+ brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, len, "data");
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+}
+
+s32
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+
+s32
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
+static u32
+brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf,
+ u32 buflen)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+ __le32 bssidx_le;
+
+ if (bssidx == 0)
+ return brcmf_create_iovar(name, data, datalen, buf, buflen);
+
+ prefixlen = strlen(prefix);
+ namelen = strlen(name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+ if (buflen < iolen) {
+ brcmf_dbg(ERROR, "buffer is too short\n");
+ return 0;
+ }
+
+ p = buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, name, namelen);
+ p += namelen;
+
+ /* bss config index as first data */
+ bssidx_le = cpu_to_le32(bssidx);
+ memcpy(p, &bssidx_le, sizeof(bssidx_le));
+ p += sizeof(bssidx_le);
+
+ /* parameter buffer follows */
+ if (datalen)
+ memcpy(p, data, datalen);
+
+ return iolen;
+}
+
+s32
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+ void *data, u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, len, "data");
+
+ buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+ drvr->proto_buf, sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+ buflen, true);
+ } else {
+ err = -EPERM;
+ brcmf_dbg(ERROR, "Creating bsscfg failed\n");
+ }
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+}
+
+s32
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+ void *data, u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ buflen = brcmf_create_bsscfg(ifp->bssidx, name, NULL, len,
+ drvr->proto_buf, sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+ buflen, false);
+ if (err == 0)
+ memcpy(data, drvr->proto_buf, len);
+ } else {
+ err = -EPERM;
+ brcmf_dbg(ERROR, "Creating bsscfg failed\n");
+ }
+ brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, len, "data");
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+
+}
+
+s32
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+ sizeof(data_le));
+}
+
+s32
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+ sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
new file mode 100644
index 0000000..16eb820
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _fwil_h_
+#define _fwil_h_
+
+s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 7a6dfdc..484a6e4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -42,7 +42,6 @@
#define IOCTL_RESP_TIMEOUT 2000
-#define BRCMF_USB_SYNC_TIMEOUT 300 /* ms */
#define BRCMF_USB_DLIMAGE_SPINWAIT 100 /* in unit of ms */
#define BRCMF_USB_DLIMAGE_LIMIT 500 /* spinwait limit (ms) */
@@ -116,10 +115,6 @@ struct brcmf_usbdev_info {
u8 *image; /* buffer for combine fw and nvram */
int image_len;
- wait_queue_head_t wait;
- bool waitdone;
- int sync_urb_status;
-
struct usb_device *usbdev;
struct device *dev;
@@ -131,7 +126,6 @@ struct brcmf_usbdev_info {
int ctl_urb_status;
int ctl_completed;
wait_queue_head_t ioctl_resp_wait;
- wait_queue_head_t ctrl_wait;
ulong ctl_op;
struct urb *bulk_urb; /* used for FW download */
@@ -754,34 +748,14 @@ static void brcmf_usb_down(struct device *dev)
brcmf_usb_free_q(&devinfo->rx_postq, true);
}
-static int
-brcmf_usb_sync_wait(struct brcmf_usbdev_info *devinfo, u16 time)
-{
- int ret;
- int err = 0;
- int ms = time;
-
- ret = wait_event_interruptible_timeout(devinfo->wait,
- devinfo->waitdone == true, (ms * HZ / 1000));
-
- if ((devinfo->waitdone == false) || (devinfo->sync_urb_status)) {
- brcmf_dbg(ERROR, "timeout(%d) or urb err=%d\n",
- ret, devinfo->sync_urb_status);
- err = -EINVAL;
- }
- devinfo->waitdone = false;
- return err;
-}
-
static void
brcmf_usb_sync_complete(struct urb *urb)
{
struct brcmf_usbdev_info *devinfo =
(struct brcmf_usbdev_info *)urb->context;
- devinfo->waitdone = true;
- wake_up_interruptible(&devinfo->wait);
- devinfo->sync_urb_status = urb->status;
+ devinfo->ctl_completed = true;
+ brcmf_usb_ioctl_resp_wake(devinfo);
}
static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
@@ -813,6 +787,7 @@ static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
(void *) tmpbuf, size,
(usb_complete_t)brcmf_usb_sync_complete, devinfo);
+ devinfo->ctl_completed = false;
ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
if (ret < 0) {
brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
@@ -820,11 +795,11 @@ static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
return false;
}
- ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
+ ret = brcmf_usb_ioctl_resp_wait(devinfo);
memcpy(buffer, tmpbuf, buflen);
kfree(tmpbuf);
- return (ret == 0);
+ return ret;
}
static bool
@@ -918,13 +893,14 @@ brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
+ devinfo->ctl_completed = false;
ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
if (ret) {
brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
return ret;
}
- ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
- return ret;
+ ret = brcmf_usb_ioctl_resp_wait(devinfo);
+ return (ret == 0);
}
static int
@@ -1284,7 +1260,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
goto error;
}
- init_waitqueue_head(&devinfo->wait);
if (!brcmf_usb_dlneeded(devinfo))
return &devinfo->bus_pub;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 481345c..6d55424 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -35,6 +35,7 @@
#include <brcmu_wifi.h>
#include "dhd.h"
#include "wl_cfg80211.h"
+#include "fwil.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define BRCMF_PNO_VERSION 2
@@ -48,6 +49,8 @@
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_SCAN_INCOMPLETE 0
+#define BRCMF_IFACE_MAX_CNT 2
+
#define TLV_LEN_OFF 1 /* length offset */
#define TLV_HDR_LEN 2 /* header length */
#define TLV_BODY_OFF 2 /* body offset */
@@ -91,16 +94,13 @@
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
-static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
-
static u32 brcmf_dbg_level = WL_DBG_ERR;
-static bool check_sys_up(struct wiphy *wiphy)
+static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- if (!test_bit(WL_STATUS_READY, &cfg->status)) {
- WL_INFO("device is not ready : status (%d)\n",
- (int)cfg->status);
+ if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
+ WL_INFO("device is not ready : status (%lu)\n",
+ vif->sme_state);
return false;
}
return true;
@@ -391,55 +391,29 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
-/* function for reading/writing a single u32 from/to the dongle */
-static int
-brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
+static u16 channel_to_chanspec(struct ieee80211_channel *ch)
{
- int err;
- __le32 par_le = cpu_to_le32(*par);
-
- err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32));
- *par = le32_to_cpu(par_le);
-
- return err;
-}
-
-static s32
-brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
- void *param, s32 paramlen,
- void *buf, s32 buflen, s32 bssidx)
-{
- s32 err = -ENOMEM;
- u32 len;
-
- len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
- buf, buflen, bssidx);
- BUG_ON(!len);
- if (len > 0)
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
- if (err)
- WL_ERR("error (%d)\n", err);
+ u16 chanspec;
- return err;
-}
+ chanspec = ieee80211_frequency_to_channel(ch->center_freq);
+ chanspec &= WL_CHANSPEC_CHAN_MASK;
-static s32
-brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
- void *param, s32 paramlen,
- void *buf, s32 buflen, s32 bssidx)
-{
- s32 err = -ENOMEM;
- u32 len;
-
- len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
- buf, buflen, bssidx);
- BUG_ON(!len);
- if (len > 0)
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
- if (err)
- WL_ERR("error (%d)\n", err);
+ if (ch->band == IEEE80211_BAND_2GHZ)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
- return err;
+ if (ch->flags & IEEE80211_CHAN_NO_HT40) {
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ } else {
+ chanspec |= WL_CHANSPEC_BW_40;
+ if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+ else
+ chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+ }
+ return chanspec;
}
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
@@ -457,18 +431,17 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
}
static int
-send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
- struct net_device *ndev, struct brcmf_wsec_key *key)
+send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
{
int err;
struct brcmf_wsec_key_le key_le;
convert_key_from_CPU(key, &key_le);
- err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
- sizeof(key_le),
- cfg->extra_buf,
- WL_EXTRA_BUF_MAX, bssidx);
+ brcmf_netdev_wait_pend8021x(ndev);
+
+ err = brcmf_fil_bsscfg_data_set(netdev_priv(ndev), "wsec_key", &key_le,
+ sizeof(key_le));
if (err)
WL_ERR("wsec_key error (%d)\n", err);
@@ -480,6 +453,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
s32 infra = 0;
s32 ap = 0;
@@ -511,7 +485,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
if (ap) {
- set_bit(WL_STATUS_AP_CREATING, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
if (!cfg->ap_info)
cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
GFP_KERNEL);
@@ -521,7 +495,8 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
WL_INFO("IF Type = AP\n");
} else {
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev),
+ BRCMF_C_SET_INFRA, infra);
if (err) {
WL_ERR("WLC_SET_INFRA error (%d)\n", err);
err = -EAGAIN;
@@ -539,99 +514,13 @@ done:
return err;
}
-static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val)
-{
- s8 buf[BRCMF_DCMD_SMLEN];
- u32 len;
- s32 err = 0;
- __le32 val_le;
-
- val_le = cpu_to_le32(val);
- len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf,
- sizeof(buf));
- BUG_ON(!len);
-
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
- if (err)
- WL_ERR("error (%d)\n", err);
-
- return err;
-}
-
-static s32
-brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
-{
- union {
- s8 buf[BRCMF_DCMD_SMLEN];
- __le32 val;
- } var;
- u32 len;
- u32 data_null;
- s32 err = 0;
-
- len =
- brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
- sizeof(var.buf));
- BUG_ON(!len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len);
- if (err)
- WL_ERR("error (%d)\n", err);
-
- *retval = le32_to_cpu(var.val);
-
- return err;
-}
-
-static s32
-brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
- s32 bssidx)
-{
- s8 buf[BRCMF_DCMD_SMLEN];
- __le32 val_le;
-
- val_le = cpu_to_le32(val);
-
- return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
- sizeof(val_le), buf, sizeof(buf),
- bssidx);
-}
-
-static s32
-brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
- s32 bssidx)
-{
- s8 buf[BRCMF_DCMD_SMLEN];
- s32 err;
- __le32 val_le;
-
- memset(buf, 0, sizeof(buf));
- err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
- sizeof(buf), bssidx);
- if (err == 0) {
- memcpy(&val_le, buf, sizeof(val_le));
- *val = le32_to_cpu(val_le);
- }
- return err;
-}
-
-
-/*
- * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
- * should return the ndev matching bssidx.
- */
-static s32
-brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
-{
- return 0;
-}
-
static void brcmf_set_mpc(struct net_device *ndev, int mpc)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- if (test_bit(WL_STATUS_READY, &cfg->status)) {
- err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
+ if (check_vif_up(ifp->vif)) {
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
if (err) {
WL_ERR("fail to set mpc\n");
return;
@@ -643,7 +532,7 @@ static void brcmf_set_mpc(struct net_device *ndev, int mpc)
static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
struct brcmf_ssid *ssid)
{
- memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+ memset(params_le->bssid, 0xFF, ETH_ALEN);
params_le->bss_type = DOT11_BSSTYPE_ANY;
params_le->scan_type = 0;
params_le->channel_num = 0;
@@ -658,30 +547,6 @@ static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
}
static s32
-brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param,
- s32 paramlen, void *bufptr, s32 buflen)
-{
- s32 iolen;
-
- iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
- BUG_ON(!iolen);
-
- return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen);
-}
-
-static s32
-brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param,
- s32 paramlen, void *bufptr, s32 buflen)
-{
- s32 iolen;
-
- iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
- BUG_ON(!iolen);
-
- return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen);
-}
-
-static s32
brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
struct brcmf_ssid *ssid, u16 action)
{
@@ -703,8 +568,8 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
params->action = cpu_to_le16(action);
params->scan_duration = cpu_to_le16(0);
- err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size,
- iscan->dcmd_buf, BRCMF_DCMD_SMLEN);
+ err = brcmf_fil_iovar_data_set(netdev_priv(iscan->ndev), "iscan",
+ params, params_size);
if (err) {
if (err == -EBUSY)
WL_INFO("system busy : iscan canceled\n");
@@ -721,7 +586,7 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_ssid ssid;
- __le32 passive_scan;
+ u32 passive_scan;
s32 err = 0;
/* Broadcast scan by default */
@@ -729,9 +594,9 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
iscan->state = WL_ISCAN_STATE_SCANING;
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev),
+ BRCMF_C_SET_PASSIVE_SCAN, passive_scan);
if (err) {
WL_ERR("error (%d)\n", err);
return err;
@@ -754,27 +619,27 @@ brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request,
struct cfg80211_ssid *this_ssid)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
struct cfg80211_ssid *ssids;
struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
- __le32 passive_scan;
+ u32 passive_scan;
bool iscan_req;
bool spec_scan;
s32 err = 0;
u32 SSID_len;
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ WL_ERR("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
- WL_ERR("Scanning being aborted : status (%lu)\n",
- cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
+ WL_ERR("Scanning being aborted: status (%lu)\n",
+ cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
- WL_ERR("Connecting : status (%lu)\n",
- cfg->status);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
+ WL_ERR("Connecting: status (%lu)\n", ifp->vif->sme_state);
return -EAGAIN;
}
@@ -792,7 +657,7 @@ brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
}
cfg->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &cfg->status);
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (iscan_req) {
err = brcmf_do_iscan(cfg);
if (!err)
@@ -813,16 +678,16 @@ brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
WL_SCAN("Broadcast scan\n");
}
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
+ passive_scan);
if (err) {
WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
goto scan_out;
}
brcmf_set_mpc(ndev, 0);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
- sizeof(sr->ssid_le));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+ &sr->ssid_le, sizeof(sr->ssid_le));
if (err) {
if (err == -EBUSY)
WL_INFO("system busy : scan for \"%s\" "
@@ -838,7 +703,7 @@ brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
return 0;
scan_out:
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->scan_request = NULL;
return err;
}
@@ -851,12 +716,10 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
s32 i;
s32 offset;
u16 chanspec;
- u16 channel;
- struct ieee80211_channel *req_channel;
char *ptr;
struct brcmf_ssid_le ssid_le;
- memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+ memset(params_le->bssid, 0xFF, ETH_ALEN);
params_le->bss_type = DOT11_BSSTYPE_ANY;
params_le->scan_type = 0;
params_le->channel_num = 0;
@@ -876,30 +739,9 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
if (n_channels > 0) {
for (i = 0; i < n_channels; i++) {
- chanspec = 0;
- req_channel = request->channels[i];
- channel = ieee80211_frequency_to_channel(
- req_channel->center_freq);
- if (req_channel->band == IEEE80211_BAND_2GHZ)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- } else {
- chanspec |= WL_CHANSPEC_BW_40;
- if (req_channel->flags &
- IEEE80211_CHAN_NO_HT40PLUS)
- chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
- else
- chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
- }
-
- chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
+ chanspec = channel_to_chanspec(request->channels[i]);
WL_SCAN("Chan : %d, Channel spec: %x\n",
- channel, chanspec);
+ request->channels[i]->hw_value, chanspec);
params_le->channel_list[i] = cpu_to_le16(chanspec);
}
} else {
@@ -966,7 +808,7 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
/* Do a scan abort to stop the driver's scan engine */
WL_SCAN("ABORT scan in firmware\n");
memset(&params_le, 0, sizeof(params_le));
- memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(params_le.bssid, 0xFF, ETH_ALEN);
params_le.bss_type = DOT11_BSSTYPE_ANY;
params_le.scan_type = 0;
params_le.channel_num = cpu_to_le32(1);
@@ -977,8 +819,8 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
/* Scan is aborted by setting channel_list[0] to -1 */
params_le.channel_list[0] = cpu_to_le16(-1);
/* E-Scan (or anyother type) can be aborted by SCAN */
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
- sizeof(params_le));
+ err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+ &params_le, sizeof(params_le));
if (err)
WL_ERR("Scan abort failed\n");
}
@@ -998,7 +840,7 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
cfg80211_scan_done(scan_request, aborted);
brcmf_set_mpc(ndev, 1);
}
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
WL_ERR("Scan complete while device not scanning\n");
return -EPERM;
}
@@ -1036,8 +878,8 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
params->action = cpu_to_le16(action);
params->sync_id = cpu_to_le16(0x1234);
- err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
- cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
+ err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan",
+ params, params_size);
if (err) {
if (err == -EBUSY)
WL_INFO("system busy : escan canceled\n");
@@ -1055,16 +897,16 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
struct net_device *ndev, struct cfg80211_scan_request *request)
{
s32 err;
- __le32 passive_scan;
+ u32 passive_scan;
struct brcmf_scan_results *results;
WL_SCAN("Enter\n");
cfg->escan_info.ndev = ndev;
cfg->escan_info.wiphy = wiphy;
cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
+ passive_scan);
if (err) {
WL_ERR("error (%d)\n", err);
return err;
@@ -1086,10 +928,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request,
struct cfg80211_ssid *this_ssid)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
struct cfg80211_ssid *ssids;
struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
- __le32 passive_scan;
+ u32 passive_scan;
bool escan_req;
bool spec_scan;
s32 err;
@@ -1097,18 +940,17 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
WL_SCAN("START ESCAN\n");
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ WL_ERR("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
- WL_ERR("Scanning being aborted : status (%lu)\n",
- cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
+ WL_ERR("Scanning being aborted: status (%lu)\n",
+ cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
- WL_ERR("Connecting : status (%lu)\n",
- cfg->status);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
+ WL_ERR("Connecting: status (%lu)\n", ifp->vif->sme_state);
return -EAGAIN;
}
@@ -1128,7 +970,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
}
cfg->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &cfg->status);
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (escan_req) {
err = brcmf_do_escan(cfg, wiphy, ndev, request);
if (!err)
@@ -1149,16 +991,16 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
} else
WL_SCAN("Broadcast scan\n");
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
+ passive_scan);
if (err) {
WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
goto scan_out;
}
brcmf_set_mpc(ndev, 0);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
- sizeof(sr->ssid_le));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+ &sr->ssid_le, sizeof(sr->ssid_le));
if (err) {
if (err == -EBUSY)
WL_INFO("BUSY: scan for \"%s\" canceled\n",
@@ -1174,7 +1016,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
return 0;
scan_out:
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (timer_pending(&cfg->escan_timeout))
del_timer_sync(&cfg->escan_timeout);
cfg->scan_request = NULL;
@@ -1182,8 +1024,7 @@ scan_out:
}
static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy,
- struct cfg80211_scan_request *request)
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct net_device *ndev = request->wdev->netdev;
struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
@@ -1191,7 +1032,8 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(container_of(request->wdev,
+ struct brcmf_cfg80211_vif, wdev)))
return -EIO;
if (cfg->iscan_on)
@@ -1210,7 +1052,8 @@ static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
{
s32 err = 0;
- err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh",
+ rts_threshold);
if (err)
WL_ERR("Error (%d)\n", err);
@@ -1221,7 +1064,8 @@ static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
{
s32 err = 0;
- err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh",
+ frag_threshold);
if (err)
WL_ERR("Error (%d)\n", err);
@@ -1233,7 +1077,7 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
s32 err = 0;
u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL);
- err = brcmf_exec_dcmd_u32(ndev, cmd, &retry);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry);
if (err) {
WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
return err;
@@ -1245,10 +1089,11 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
@@ -1327,7 +1172,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
if (cfg->link_up) {
ndev = cfg_to_ndev(cfg);
WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
- err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
+ err = brcmf_fil_cmd_data_set(netdev_priv(ndev),
+ BRCMF_C_DISASSOC, NULL, 0);
if (err)
WL_ERR("WLC_DISASSOC failed (%d)\n", err);
cfg->link_up = false;
@@ -1340,7 +1186,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ibss_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_join_params join_params;
size_t join_params_size = 0;
s32 err = 0;
@@ -1348,7 +1195,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
s32 bcnprd;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (params->ssid)
@@ -1358,7 +1205,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
- set_bit(WL_STATUS_CONNECTING, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (params->bssid)
WL_CONN("BSSID: %pM\n", params->bssid);
@@ -1399,7 +1246,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
if (params->privacy)
wsec |= WEP_ENABLED;
- err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+ err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec);
if (err) {
WL_ERR("wsec failed (%d)\n", err);
goto done;
@@ -1411,7 +1258,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
else
bcnprd = 100;
- err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd);
+ err = brcmf_fil_cmd_int_set(ifp, BRCM_SET_BCNPRD, bcnprd);
if (err) {
WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err);
goto done;
@@ -1434,7 +1281,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
BRCMF_ASSOC_PARAMS_FIXED_SIZE;
memcpy(profile->bssid, params->bssid, ETH_ALEN);
} else {
- memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
memset(profile->bssid, 0, ETH_ALEN);
}
@@ -1453,8 +1300,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
/* set channel for starter */
target_channel = cfg->channel;
- err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
- &target_channel);
+ err = brcmf_fil_cmd_int_set(ifp, BRCM_SET_CHANNEL,
+ target_channel);
if (err) {
WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err);
goto done;
@@ -1465,8 +1312,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
cfg->ibss_starter = false;
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
- &join_params, join_params_size);
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
if (err) {
WL_ERR("WLC_SET_SSID failed (%d)\n", err);
goto done;
@@ -1474,7 +1321,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
done:
if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
WL_TRACE("Exit\n");
return err;
}
@@ -1483,10 +1330,11 @@ static s32
brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
brcmf_link_down(cfg);
@@ -1499,8 +1347,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1512,7 +1359,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
else
val = WPA_AUTH_DISABLED;
WL_CONN("setting wpa_auth to 0x%0x\n", val);
- err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
WL_ERR("set wpa_auth failed (%d)\n", err);
return err;
@@ -1525,8 +1372,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
static s32 brcmf_set_auth_type(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1552,7 +1398,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
break;
}
- err = brcmf_dev_intvar_set(ndev, "auth", val);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
if (err) {
WL_ERR("set auth failed (%d)\n", err);
return err;
@@ -1566,8 +1412,7 @@ static s32
brcmf_set_set_cipher(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
@@ -1617,7 +1462,7 @@ brcmf_set_set_cipher(struct net_device *ndev,
}
WL_CONN("pval (%d) gval (%d)\n", pval, gval);
- err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
if (err) {
WL_ERR("error (%d)\n", err);
return err;
@@ -1633,14 +1478,14 @@ brcmf_set_set_cipher(struct net_device *ndev,
static s32
brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
if (sme->crypto.n_akm_suites) {
- err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val);
+ err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
WL_ERR("could not get wpa_auth (%d)\n", err);
return err;
@@ -1674,7 +1519,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
WL_CONN("setting wpa_auth to %d\n", val);
- err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
+ "wpa_auth", val);
if (err) {
WL_ERR("could not set wpa_auth (%d)\n", err);
return err;
@@ -1690,13 +1536,11 @@ static s32
brcmf_set_sharedkey(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
struct brcmf_wsec_key key;
s32 val;
s32 err = 0;
- s32 bssidx;
WL_CONN("key len (%d)\n", sme->key_len);
@@ -1739,15 +1583,14 @@ brcmf_set_sharedkey(struct net_device *ndev,
WL_CONN("key length (%d) key index (%d) algo (%d)\n",
key.len, key.index, key.algo);
WL_CONN("key \"%s\"\n", key.data);
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ err = send_key_to_dongle(ndev, &key);
if (err)
return err;
if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
WL_CONN("set auth_type to shared key\n");
val = WL_AUTH_SHARED_KEY; /* shared key */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
if (err)
WL_ERR("set auth failed (%d)\n", err);
}
@@ -1759,7 +1602,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
@@ -1768,7 +1612,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (!sme->ssid) {
@@ -1776,7 +1620,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
- set_bit(WL_STATUS_CONNECTING, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (chan) {
cfg->channel =
@@ -1827,7 +1671,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
- memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
WL_CONN("ssid \"%s\", len (%d)\n",
@@ -1835,14 +1679,14 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
brcmf_ch_to_chanspec(cfg->channel,
&join_params, &join_params_size);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
- &join_params, join_params_size);
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
if (err)
WL_ERR("WLC_SET_SSID failed (%d)\n", err);
done:
if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
WL_TRACE("Exit\n");
return err;
}
@@ -1852,20 +1696,21 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
u16 reason_code)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_scb_val_le scbval;
s32 err = 0;
WL_TRACE("Enter. Reason code = %d\n", reason_code);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
- clear_bit(WL_STATUS_CONNECTED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
- sizeof(struct brcmf_scb_val_le));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC,
+ &scbval, sizeof(scbval));
if (err)
WL_ERR("error (%d)\n", err);
@@ -1882,13 +1727,14 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(ndev);
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
s32 dbm = MBM_TO_DBM(mbm);
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
switch (type) {
@@ -1905,7 +1751,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
}
/* Make sure radio is off or on as far as software is concerned */
disable = WL_RADIO_SW_DISABLE << 16;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable);
if (err)
WL_ERR("WLC_SET_RADIO error (%d)\n", err);
@@ -1913,8 +1759,8 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
txpwrmw = 0xffff;
else
txpwrmw = (u16) dbm;
- err = brcmf_dev_intvar_set(ndev, "qtxpower",
- (s32) (brcmf_mw_to_qdbm(txpwrmw)));
+ err = brcmf_fil_iovar_int_set(ifp, "qtxpower",
+ (s32)brcmf_mw_to_qdbm(txpwrmw));
if (err)
WL_ERR("qtxpower error (%d)\n", err);
cfg->conf->tx_power = dbm;
@@ -1927,16 +1773,16 @@ done:
static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
s32 txpwrdbm;
u8 result;
s32 err = 0;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
- err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+ err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &txpwrdbm);
if (err) {
WL_ERR("error (%d)\n", err);
goto done;
@@ -1954,19 +1800,17 @@ static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool unicast, bool multicast)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
u32 index;
u32 wsec;
s32 err = 0;
- s32 bssidx;
WL_TRACE("Enter\n");
WL_CONN("key index (%d)\n", key_idx);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
WL_ERR("WLC_GET_WSEC error (%d)\n", err);
goto done;
@@ -1975,8 +1819,8 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
if (wsec & WEP_ENABLED) {
/* Just select a new current key */
index = key_idx;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY,
- &index);
+ err = brcmf_fil_cmd_int_set(ifp,
+ BRCMF_C_SET_KEY_PRIMARY, index);
if (err)
WL_ERR("error (%d)\n", err);
}
@@ -1989,11 +1833,8 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_wsec_key key;
- struct brcmf_wsec_key_le key_le;
s32 err = 0;
- s32 bssidx;
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@@ -2002,11 +1843,10 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
if (!is_multicast_ether_addr(mac_addr))
memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
key.len = (u32) params->key_len;
- bssidx = brcmf_find_bssidx(cfg, ndev);
/* check for key index change */
if (key.len == 0) {
/* key delete */
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ err = send_key_to_dongle(ndev, &key);
if (err)
WL_ERR("key delete error (%d)\n", err);
} else {
@@ -2061,13 +1901,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
return -EINVAL;
}
- convert_key_from_CPU(&key, &key_le);
-
- brcmf_netdev_wait_pend8021x(ndev);
- err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
- sizeof(key_le),
- cfg->extra_buf,
- WL_EXTRA_BUF_MAX, bssidx);
+ err = send_key_to_dongle(ndev, &key);
if (err)
WL_ERR("wsec_key error (%d)\n", err);
}
@@ -2080,16 +1914,16 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
struct key_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 val;
s32 wsec;
s32 err = 0;
u8 keybuf[8];
- s32 bssidx;
WL_TRACE("Enter\n");
WL_CONN("key index (%d)\n", key_idx);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (mac_addr) {
@@ -2147,18 +1981,17 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ err = send_key_to_dongle(ndev, &key);
if (err)
goto done;
- err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
WL_ERR("get wsec error (%d)\n", err);
goto done;
}
wsec |= val;
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err) {
WL_ERR("set wsec error (%d)\n", err);
goto done;
@@ -2173,13 +2006,12 @@ static s32
brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool pairwise, const u8 *mac_addr)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 err = 0;
- s32 bssidx;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
memset(&key, 0, sizeof(key));
@@ -2191,8 +2023,7 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
WL_CONN("key index (%d)\n", key_idx);
/* Set the new key/index */
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ err = send_key_to_dongle(ndev, &key);
if (err) {
if (err == -EINVAL) {
if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -2213,22 +2044,20 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
void (*callback) (void *cookie, struct key_params * params))
{
struct key_params params;
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_cfg80211_security *sec;
s32 wsec;
s32 err = 0;
- s32 bssidx;
WL_TRACE("Enter\n");
WL_CONN("key index (%d)\n", key_idx);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
memset(&params, 0, sizeof(params));
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
WL_ERR("WLC_GET_WSEC error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
@@ -2280,33 +2109,33 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac, struct station_info *sinfo)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_scb_val_le scb_val;
int rssi;
s32 rate;
s32 err = 0;
u8 *bssid = profile->bssid;
- struct brcmf_sta_info_le *sta_info_le;
+ struct brcmf_sta_info_le sta_info_le;
WL_TRACE("Enter, MAC %pM\n", mac);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (cfg->conf->mode == WL_MODE_AP) {
- err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
- cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
+ memcpy(&sta_info_le, mac, ETH_ALEN);
+ err = brcmf_fil_iovar_data_get(ifp, "sta_info",
+ &sta_info_le,
+ sizeof(sta_info_le));
if (err < 0) {
WL_ERR("GET STA INFO failed, %d\n", err);
goto done;
}
- sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
-
sinfo->filled = STATION_INFO_INACTIVE_TIME;
- sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
- if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
+ sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
+ if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
sinfo->filled |= STATION_INFO_CONNECTED_TIME;
- sinfo->connected_time = le32_to_cpu(sta_info_le->in);
+ sinfo->connected_time = le32_to_cpu(sta_info_le.in);
}
WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
sinfo->inactive_time, sinfo->connected_time);
@@ -2318,7 +2147,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
/* Report the current tx rate */
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
if (err) {
WL_ERR("Could not get rate (%d)\n", err);
goto done;
@@ -2328,10 +2157,11 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
WL_CONN("Rate %d Mbps\n", rate / 2);
}
- if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
memset(&scb_val, 0, sizeof(scb_val));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
- sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
if (err) {
WL_ERR("Could not get rssi (%d)\n", err);
goto done;
@@ -2356,6 +2186,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
s32 pm;
s32 err = 0;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
WL_TRACE("Enter\n");
@@ -2367,7 +2198,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
* FW later while initializing the dongle
*/
cfg->pwr_save = enabled;
- if (!test_bit(WL_STATUS_READY, &cfg->status)) {
+ if (!check_vif_up(ifp->vif)) {
WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
goto done;
@@ -2376,7 +2207,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
pm = enabled ? PM_FAST : PM_OFF;
WL_INFO("power save %s\n", (pm ? "enabled" : "disabled"));
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
if (err) {
if (err == -ENODEV)
WL_ERR("net_device is not ready yet\n");
@@ -2393,6 +2224,7 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcm_rateset_le rateset_le;
s32 rate;
s32 val;
@@ -2402,13 +2234,13 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
/* addr param is always NULL. ignore it */
/* Get current rateset */
- err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le,
- sizeof(rateset_le));
+ err = brcmf_fil_cmd_data_get(ifp, BRCM_GET_CURR_RATESET,
+ &rateset_le, sizeof(rateset_le));
if (err) {
WL_ERR("could not get current rateset (%d)\n", err);
goto done;
@@ -2435,8 +2267,8 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
* Set rate override,
* Since the is a/b/g-blind, both a/bg_rate are enforced.
*/
- err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate);
- err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate);
+ err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate);
+ err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate);
if (err_bg && err_a) {
WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
err = err_bg | err_a;
@@ -2565,7 +2397,8 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+ err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO,
+ buf, WL_BSS_INFO_MAX);
if (err) {
WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err);
goto CleanUp;
@@ -2674,12 +2507,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
return false;
}
-struct brcmf_vs_tlv *
+static struct brcmf_vs_tlv *
brcmf_find_wpaie(u8 *parse, u32 len)
{
struct brcmf_tlv *ie;
- while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
@@ -2689,7 +2522,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
@@ -2706,8 +2541,8 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
ssid = &profile->ssid;
*(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
- cfg->extra_buf, WL_EXTRA_BUF_MAX);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+ cfg->extra_buf, WL_EXTRA_BUF_MAX);
if (err) {
WL_ERR("Could not get bss info %d\n", err);
goto update_bss_info_out;
@@ -2732,8 +2567,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
* so we speficially query dtim information to dongle.
*/
u32 var;
- err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
- "dtim_assoc", &var);
+ err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
if (err) {
WL_ERR("wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
@@ -2741,9 +2575,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
dtim_period = (u8)var;
}
- profile->beacon_interval = beacon_interval;
- profile->dtim_period = dtim_period;
-
update_bss_info_out:
WL_TRACE("Exit");
return err;
@@ -2755,7 +2586,7 @@ static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
struct escan_info *escan = &cfg->escan_info;
struct brcmf_ssid ssid;
- set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
+ set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
if (cfg->iscan_on) {
iscan->state = WL_ISCAN_STATE_IDLE;
@@ -2781,8 +2612,8 @@ static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
}
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+ clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
}
static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
@@ -2791,7 +2622,7 @@ static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
struct net_device *ndev = cfg_to_ndev(cfg);
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
WL_ERR("Scan complete while device not scanning\n");
return;
}
@@ -2820,7 +2651,6 @@ static s32
brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
struct brcmf_scan_results **bss_list)
{
- struct brcmf_iscan_results list;
struct brcmf_scan_results *results;
struct brcmf_scan_results_le *results_le;
struct brcmf_iscan_results *list_buf;
@@ -2830,15 +2660,13 @@ brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
results = &list_buf->results;
results_le = &list_buf->results_le;
- results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
+ results_le->buflen = cpu_to_le32(sizeof(iscan->scan_buf));
+ results_le->version = 0;
+ results_le->count = 0;
- memset(&list, 0, sizeof(list));
- list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
- err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list,
- BRCMF_ISCAN_RESULTS_FIXED_SIZE,
- iscan->scan_buf, WL_ISCAN_BUF_MAX);
+ err = brcmf_fil_iovar_data_get(netdev_priv(iscan->ndev), "iscanresults",
+ iscan->scan_buf,
+ sizeof(iscan->scan_buf));
if (err) {
WL_ERR("error (%d)\n", err);
return err;
@@ -3052,10 +2880,10 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
status = be32_to_cpu(e->status);
if (!ndev || !cfg->escan_on ||
- !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
ndev, cfg->escan_on,
- !test_bit(WL_STATUS_SCANNING, &cfg->status));
+ !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status));
return -EPERM;
}
@@ -3159,16 +2987,17 @@ static __always_inline void brcmf_delay(u32 ms)
static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
/*
- * Check for WL_STATUS_READY before any function call which
+ * Check for BRCMF_VIF_STATUS_READY before any function call which
* could result is bus access. Don't block the resume for
* any driver error conditions
*/
WL_TRACE("Enter\n");
- if (test_bit(WL_STATUS_READY, &cfg->status))
- brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
+ if (check_vif_up(ifp->vif))
+ brcmf_invoke_iscan(cfg);
WL_TRACE("Exit\n");
return 0;
@@ -3179,85 +3008,53 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_cfg80211_vif *vif;
WL_TRACE("Enter\n");
/*
- * Check for WL_STATUS_READY before any function call which
- * could result is bus access. Don't block the suspend for
- * any driver error conditions
- */
-
- /*
- * While going to suspend if associated with AP disassociate
- * from AP to save power while system is in suspended state
+ * if the primary net_device is not READY there is nothing
+ * we can do but pray resume goes smoothly.
*/
- if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
- test_bit(WL_STATUS_READY, &cfg->status)) {
- WL_INFO("Disassociating from AP"
- " while entering suspend state\n");
- brcmf_link_down(cfg);
+ vif = ((struct brcmf_if *)netdev_priv(ndev))->vif;
+ if (!check_vif_up(vif))
+ goto exit;
+ list_for_each_entry(vif, &cfg->vif_list, list) {
+ if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state))
+ continue;
/*
- * Make sure WPA_Supplicant receives all the event
- * generated due to DISASSOC call to the fw to keep
- * the state fw and WPA_Supplicant state consistent
+ * While going to suspend if associated with AP disassociate
+ * from AP to save power while system is in suspended state
*/
- brcmf_delay(500);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state) ||
+ test_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state)) {
+ WL_INFO("Disassociating from AP before suspend\n");
+ brcmf_link_down(cfg);
+
+ /* Make sure WPA_Supplicant receives all the event
+ * generated due to DISASSOC call to the fw to keep
+ * the state fw and WPA_Supplicant state consistent
+ */
+ brcmf_delay(500);
+ }
}
- if (test_bit(WL_STATUS_READY, &cfg->status))
+ /* end any scanning */
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
brcmf_abort_scanning(cfg);
- else
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
/* Turn off watchdog timer */
- if (test_bit(WL_STATUS_READY, &cfg->status))
- brcmf_set_mpc(ndev, 1);
+ brcmf_set_mpc(ndev, 1);
+exit:
WL_TRACE("Exit\n");
-
+ /* clear any scanning activity */
+ cfg->scan_status = 0;
return 0;
}
static __used s32
-brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
-{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- u32 buflen;
-
- buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
- BUG_ON(!buflen);
-
- return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
- buflen);
-}
-
-static s32
-brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
- s32 buf_len)
-{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- u32 len;
- s32 err = 0;
-
- len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
- BUG_ON(!len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
- if (err) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- memcpy(buf, cfg->dcmd_buf, buf_len);
-
- return err;
-}
-
-static __used s32
brcmf_update_pmklist(struct net_device *ndev,
struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
{
@@ -3275,8 +3072,8 @@ brcmf_update_pmklist(struct net_device *ndev,
}
if (!err)
- brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
- sizeof(*pmk_list));
+ brcmf_fil_iovar_data_set(netdev_priv(ndev), "pmkid_info",
+ (char *)pmk_list, sizeof(*pmk_list));
return err;
}
@@ -3286,13 +3083,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_pmksa *pmksa)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
s32 err = 0;
int i;
int pmkid_len;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
pmkid_len = le32_to_cpu(pmkids->npmkid);
@@ -3325,12 +3123,13 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_pmksa *pmksa)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list pmkid;
s32 err = 0;
int i, pmkid_len;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
@@ -3375,10 +3174,11 @@ static s32
brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
@@ -3478,15 +3278,15 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
if (request->n_ssids)
request->ssids = &ssid[0];
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
/* Abort any on-going scan */
brcmf_abort_scanning(cfg);
}
- set_bit(WL_STATUS_SCANNING, &cfg->status);
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
err = brcmf_do_escan(cfg, wiphy, ndev, request);
if (err) {
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
goto out_err;
}
cfg->sched_escan = true;
@@ -3512,15 +3312,14 @@ out_err:
#ifndef CONFIG_BRCMISCAN
static int brcmf_dev_pno_clean(struct net_device *ndev)
{
- char iovbuf[128];
int ret;
/* Disable pfn */
- ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
+ ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
if (ret == 0) {
/* clear pfn */
- ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
- iovbuf, sizeof(iovbuf));
+ ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
+ NULL, 0);
}
if (ret < 0)
WL_ERR("failed code %d\n", ret);
@@ -3531,7 +3330,6 @@ static int brcmf_dev_pno_clean(struct net_device *ndev)
static int brcmf_dev_pno_config(struct net_device *ndev)
{
struct brcmf_pno_param_le pfn_param;
- char iovbuf[128];
memset(&pfn_param, 0, sizeof(pfn_param));
pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
@@ -3544,9 +3342,8 @@ static int brcmf_dev_pno_config(struct net_device *ndev)
/* set up pno scan fr */
pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
- return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
- &pfn_param, sizeof(pfn_param),
- iovbuf, sizeof(iovbuf));
+ return brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfn_set",
+ &pfn_param, sizeof(pfn_param));
}
static int
@@ -3554,7 +3351,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_sched_scan_request *request)
{
- char iovbuf[128];
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_pno_net_param_le pfn;
int i;
@@ -3562,8 +3359,8 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
WL_SCAN("Enter n_match_sets:%d n_ssids:%d\n",
request->n_match_sets, request->n_ssids);
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ WL_ERR("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
@@ -3620,15 +3417,14 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
- ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
- &pfn, sizeof(pfn),
- iovbuf, sizeof(iovbuf));
+ ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
+ sizeof(pfn));
WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
ret == 0 ? "set" : "failed",
ssid->ssid);
}
/* Enable the PNO */
- if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
+ if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
WL_ERR("PNO enable failed!! ret=%d\n", ret);
return -EINVAL;
}
@@ -3656,12 +3452,20 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg->wdev->netdev;
+ struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_dcmd *dcmd = data;
struct sk_buff *reply;
int ret;
- ret = brcmf_netlink_dcmd(ndev, dcmd);
+ WL_TRACE("cmd %x set %d buf %p len %d\n", dcmd->cmd, dcmd->set,
+ dcmd->buf, dcmd->len);
+
+ if (dcmd->set)
+ ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), dcmd->cmd,
+ dcmd->buf, dcmd->len);
+ else
+ ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), dcmd->cmd,
+ dcmd->buf, dcmd->len);
if (ret == 0) {
reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
@@ -3673,23 +3477,23 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
/* set auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0);
if (err < 0) {
WL_ERR("auth error %d\n", err);
return err;
}
/* set wsec */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0);
if (err < 0) {
WL_ERR("wsec error %d\n", err);
return err;
}
/* set upper-layer auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
- WPA_AUTH_NONE, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE);
if (err < 0) {
WL_ERR("wpa_auth error %d\n", err);
return err;
@@ -3710,6 +3514,7 @@ static s32
brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
bool is_rsn_ie, s32 bssidx)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
u16 count;
s32 err = 0;
@@ -3850,8 +3655,8 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
wme_bss_disable = 0;
}
/* set wme_bss_disable to sync RSN Capabilities */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
- wme_bss_disable, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
+ wme_bss_disable);
if (err < 0) {
WL_ERR("wme_bss_disable error %d\n", err);
goto exit;
@@ -3861,19 +3666,19 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
wsec = (pval | gval | SES_OW_ENABLED);
/* set auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth);
if (err < 0) {
WL_ERR("auth error %d\n", err);
goto exit;
}
/* set wsec */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err < 0) {
WL_ERR("wsec error %d\n", err);
goto exit;
}
/* set upper-layer auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
if (err < 0) {
WL_ERR("wpa_auth error %d\n", err);
goto exit;
@@ -3963,17 +3768,19 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
return ie_len + VNDR_IE_HDR_SIZE;
}
-s32
+static s32
brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev, s32 bssidx, s32 pktflag,
+ struct net_device *ndev, s32 pktflag,
u8 *vndr_ie_buf, u32 vndr_ie_len)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct vif_saved_ie *saved_ie = &ifp->vif->saved_ie;
s32 err = 0;
u8 *iovar_ie_buf;
u8 *curr_ie_buf;
u8 *mgmt_ie_buf = NULL;
int mgmt_ie_buf_len;
- u32 *mgmt_ie_len = 0;
+ u32 *mgmt_ie_len;
u32 del_add_ie_buf_len = 0;
u32 total_ie_buf_len = 0;
u32 parsed_ie_buf_len = 0;
@@ -3984,31 +3791,29 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
u8 *ptr;
int remained_buf_len;
- WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
+ WL_TRACE("bssidx %d, pktflag : 0x%02X\n",
+ brcmf_ndev_bssidx(ndev), pktflag);
iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (!iovar_ie_buf)
return -ENOMEM;
curr_ie_buf = iovar_ie_buf;
- if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
- test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
+ if (ifp->vif->mode == WL_MODE_AP) {
switch (pktflag) {
case VNDR_IE_PRBRSP_FLAG:
- mgmt_ie_buf = cfg->ap_info->probe_res_ie;
- mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
- mgmt_ie_buf_len =
- sizeof(cfg->ap_info->probe_res_ie);
+ mgmt_ie_buf = saved_ie->probe_res_ie;
+ mgmt_ie_len = &saved_ie->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
break;
case VNDR_IE_BEACON_FLAG:
- mgmt_ie_buf = cfg->ap_info->beacon_ie;
- mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+ mgmt_ie_buf = saved_ie->beacon_ie;
+ mgmt_ie_len = &saved_ie->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
break;
default:
err = -EPERM;
WL_ERR("not suitable type\n");
goto exit;
}
- bssidx = 0;
} else {
err = -EPERM;
WL_ERR("not suitable type\n");
@@ -4104,11 +3909,8 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
}
}
if (total_ie_buf_len) {
- err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
- iovar_ie_buf,
- total_ie_buf_len,
- cfg->extra_buf,
- WL_EXTRA_BUF_MAX, bssidx);
+ err = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf,
+ total_ie_buf_len);
if (err)
WL_ERR("vndr ie set error : %d\n", err);
}
@@ -4123,9 +3925,9 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
s32 ie_offset;
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
- s32 ioctl_value;
s32 err = -EPERM;
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
@@ -4140,7 +3942,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
- if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
+ if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
WL_ERR("Not in AP creation mode\n");
return -EPERM;
}
@@ -4164,20 +3966,17 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_set_mpc(ndev, 0);
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
WL_ERR("BRCMF_C_DOWN error %d\n", err);
goto exit;
}
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
WL_ERR("SET INFRA error %d\n", err);
goto exit;
}
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
if (err < 0) {
WL_ERR("setting AP mode failed %d\n", err);
goto exit;
@@ -4226,7 +4025,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
cfg->ap_info->security_mode = false;
}
/* Set Beacon IEs to FW */
- err = brcmf_set_management_ie(cfg, ndev, bssidx,
+ err = brcmf_set_management_ie(cfg, ndev,
VNDR_IE_BEACON_FLAG,
(u8 *)settings->beacon.tail,
settings->beacon.tail_len);
@@ -4236,7 +4035,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
WL_TRACE("Applied Vndr IEs for Beacon\n");
/* Set Probe Response IEs to FW */
- err = brcmf_set_management_ie(cfg, ndev, bssidx,
+ err = brcmf_set_management_ie(cfg, ndev,
VNDR_IE_PRBRSP_FLAG,
(u8 *)settings->beacon.proberesp_ies,
settings->beacon.proberesp_ies_len);
@@ -4246,25 +4045,22 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
WL_TRACE("Applied Vndr IEs for Probe Resp\n");
if (settings->beacon_interval) {
- ioctl_value = settings->beacon_interval;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
- &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
+ settings->beacon_interval);
if (err < 0) {
WL_ERR("Beacon Interval Set Error, %d\n", err);
goto exit;
}
}
if (settings->dtim_period) {
- ioctl_value = settings->dtim_period;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
- &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
+ settings->dtim_period);
if (err < 0) {
WL_ERR("DTIM Interval Set Error, %d\n", err);
goto exit;
}
}
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0) {
WL_ERR("BRCMF_C_UP error (%d)\n", err);
goto exit;
@@ -4274,14 +4070,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* join parameters starts with ssid */
memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
/* create softap */
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
- sizeof(join_params));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, sizeof(join_params));
if (err < 0) {
WL_ERR("SET SSID error (%d)\n", err);
goto exit;
}
- clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
- set_bit(WL_STATUS_AP_CREATED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
exit:
if (err)
@@ -4291,8 +4087,8 @@ exit:
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- s32 ioctl_value;
s32 err = -EPERM;
WL_TRACE("Enter\n");
@@ -4301,21 +4097,20 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
- ioctl_value = 0;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev),
+ BRCMF_C_SET_AP, 0);
if (err < 0) {
WL_ERR("setting AP mode failed %d\n", err);
goto exit;
}
- ioctl_value = 0;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_UP, 0);
if (err < 0) {
WL_ERR("BRCMF_C_UP error %d\n", err);
goto exit;
}
brcmf_set_mpc(ndev, 1);
- clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
- clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
}
exit:
return err;
@@ -4326,6 +4121,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac)
{
struct brcmf_scb_val_le scbval;
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
if (!mac)
@@ -4333,13 +4129,13 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
WL_TRACE("Enter %pM\n", mac);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
memcpy(&scbval.ea, mac, ETH_ALEN);
scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
- &scbval, sizeof(scbval));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scbval));
if (err)
WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
@@ -4410,72 +4206,97 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
#endif
}
-static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
+static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
{
- struct wireless_dev *wdev;
+ struct wiphy *wiphy;
s32 err = 0;
- wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
- if (!wdev)
- return ERR_PTR(-ENOMEM);
-
- wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
- sizeof(struct brcmf_cfg80211_info));
- if (!wdev->wiphy) {
+ wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
+ if (!wiphy) {
WL_ERR("Could not allocate wiphy device\n");
- err = -ENOMEM;
- goto wiphy_new_out;
- }
- set_wiphy_dev(wdev->wiphy, ndev);
- wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
- wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
+ return ERR_PTR(-ENOMEM);
+ }
+ set_wiphy_dev(wiphy, phydev);
+ wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
* it as 11a by default.
* This will be updated with
* 11n phy tables in
* "ifconfig up"
* if phy has 11n capability
*/
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wdev->wiphy->cipher_suites = __wl_cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
- wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->cipher_suites = __wl_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
* save mode
* by default
*/
- brcmf_wiphy_pno_params(wdev->wiphy);
- err = wiphy_register(wdev->wiphy);
+ brcmf_wiphy_pno_params(wiphy);
+ err = wiphy_register(wiphy);
if (err < 0) {
WL_ERR("Could not register wiphy device (%d)\n", err);
- goto wiphy_register_out;
+ wiphy_free(wiphy);
+ return ERR_PTR(err);
+ }
+ return wiphy;
+}
+
+static
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+ struct net_device *netdev,
+ s32 mode, bool pm_block)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
+ return ERR_PTR(-ENOSPC);
+
+ vif = kzalloc(sizeof(*vif), GFP_KERNEL);
+ if (!vif)
+ return ERR_PTR(-ENOMEM);
+
+ vif->wdev.wiphy = cfg->wiphy;
+ vif->wdev.netdev = netdev;
+ vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
+
+ if (netdev) {
+ vif->ifp = netdev_priv(netdev);
+ netdev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
}
- return wdev;
-wiphy_register_out:
- wiphy_free(wdev->wiphy);
+ vif->mode = mode;
+ vif->pm_block = pm_block;
+ vif->roam_off = -1;
-wiphy_new_out:
- kfree(wdev);
+ brcmf_init_prof(&vif->profile);
- return ERR_PTR(err);
+ list_add_tail(&vif->list, &cfg->vif_list);
+ cfg->vif_cnt++;
+ return vif;
}
-static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
+static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
- struct wireless_dev *wdev = cfg->wdev;
+ struct brcmf_cfg80211_info *cfg;
+ struct wiphy *wiphy;
- if (!wdev) {
- WL_ERR("wdev is invalid\n");
- return;
+ wiphy = vif->wdev.wiphy;
+ cfg = wiphy_priv(wiphy);
+ list_del(&vif->list);
+ cfg->vif_cnt--;
+
+ kfree(vif);
+ if (!cfg->vif_cnt) {
+ wiphy_unregister(wiphy);
+ wiphy_free(wiphy);
}
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
- cfg->wdev = NULL;
}
static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
@@ -4541,7 +4362,7 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
{
- struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
@@ -4550,8 +4371,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
brcmf_clear_assoc_ies(cfg);
- err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_fil_iovar_data_get(ifp, "assoc_info",
+ cfg->extra_buf, WL_ASSOC_INFO_MAX);
if (err) {
WL_ERR("could not get assoc info (%d)\n", err);
return err;
@@ -4561,9 +4382,9 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
req_len = le32_to_cpu(assoc_info->req_len);
resp_len = le32_to_cpu(assoc_info->resp_len);
if (req_len) {
- err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
- cfg->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
+ cfg->extra_buf,
+ WL_ASSOC_INFO_MAX);
if (err) {
WL_ERR("could not get assoc req (%d)\n", err);
return err;
@@ -4577,9 +4398,9 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->req_ie = NULL;
}
if (resp_len) {
- err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
- cfg->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_fil_iovar_data_get(ifp, "assoc_resp_ies",
+ cfg->extra_buf,
+ WL_ASSOC_INFO_MAX);
if (err) {
WL_ERR("could not get assoc resp (%d)\n", err);
return err;
@@ -4603,7 +4424,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct ieee80211_channel *notify_channel = NULL;
@@ -4628,7 +4450,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
/* data sent to dongle has to be little endian */
*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+ buf, WL_BSS_INFO_MAX);
if (err)
goto done;
@@ -4652,7 +4475,7 @@ done:
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
WL_CONN("Report roaming result\n");
- set_bit(WL_STATUS_CONNECTED, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
WL_TRACE("Exit\n");
return err;
}
@@ -4662,13 +4485,15 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev, const struct brcmf_event_msg *e,
bool completed)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
s32 err = 0;
WL_TRACE("Enter\n");
- if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
+ if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state)) {
if (completed) {
brcmf_get_assoc_ies(cfg);
memcpy(profile->bssid, e->addr, ETH_ALEN);
@@ -4684,7 +4509,8 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
if (completed)
- set_bit(WL_STATUS_CONNECTED, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
WL_CONN("Report connect result - connection %s\n",
completed ? "succeeded" : "failed");
}
@@ -4736,7 +4562,8 @@ brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
s32 err = 0;
if (cfg->conf->mode == WL_MODE_AP) {
@@ -4747,30 +4574,34 @@ brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
- set_bit(WL_STATUS_CONNECTED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
} else
brcmf_bss_connect_done(cfg, ndev, e, true);
} else if (brcmf_is_linkdown(cfg, e)) {
WL_CONN("Linkdown\n");
if (brcmf_is_ibssmode(cfg)) {
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
- if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg->status))
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
+ if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state))
brcmf_link_down(cfg);
} else {
brcmf_bss_connect_done(cfg, ndev, e, false);
- if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg->status)) {
+ if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
cfg80211_disconnected(ndev, 0, NULL, 0,
- GFP_KERNEL);
+ GFP_KERNEL);
brcmf_link_down(cfg);
}
}
- brcmf_init_prof(cfg->profile);
+ brcmf_init_prof(ndev_to_prof(ndev));
} else if (brcmf_is_nonetwork(cfg, e)) {
if (brcmf_is_ibssmode(cfg))
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
else
brcmf_bss_connect_done(cfg, ndev, e, false);
}
@@ -4783,12 +4614,13 @@ brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
u32 event = be32_to_cpu(e->event_type);
u32 status = be32_to_cpu(e->status);
if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
- if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
brcmf_bss_roaming_done(cfg, ndev, e);
else
brcmf_bss_connect_done(cfg, ndev, e, true);
@@ -4821,6 +4653,7 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_channel_info_le channel_inform_le;
struct brcmf_scan_results_le *bss_list_le;
u32 len = WL_SCAN_BUF_MAX;
@@ -4835,15 +4668,16 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
}
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
WL_ERR("Scan complete while device not scanning\n");
scan_abort = true;
err = -EINVAL;
goto scan_done_out;
}
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le,
- sizeof(channel_inform_le));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL,
+ &channel_inform_le,
+ sizeof(channel_inform_le));
if (err) {
WL_ERR("scan busy (%d)\n", err);
scan_abort = true;
@@ -4857,8 +4691,8 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
memset(cfg->scan_results, 0, len);
bss_list_le->buflen = cpu_to_le32(len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
- cfg->scan_results, len);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_SCAN_RESULTS,
+ cfg->scan_results, len);
if (err) {
WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
err = -EINVAL;
@@ -4920,8 +4754,6 @@ static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
cfg->bss_info = NULL;
kfree(cfg->conf);
cfg->conf = NULL;
- kfree(cfg->profile);
- cfg->profile = NULL;
kfree(cfg->scan_req_int);
cfg->scan_req_int = NULL;
kfree(cfg->escan_ioctl_buf);
@@ -4950,9 +4782,6 @@ static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
if (!cfg->conf)
goto init_priv_mem_out;
- cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
- if (!cfg->profile)
- goto init_priv_mem_out;
cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (!cfg->bss_info)
goto init_priv_mem_out;
@@ -5129,7 +4958,6 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
return err;
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
- brcmf_init_prof(cfg->profile);
brcmf_link_down(cfg);
return err;
@@ -5145,12 +4973,14 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
brcmf_deinit_priv_mem(cfg);
}
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
- struct device *busdev,
- struct brcmf_pub *drvr)
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr)
{
- struct wireless_dev *wdev;
+ struct net_device *ndev = drvr->iflist[0]->ndev;
+ struct device *busdev = drvr->dev;
struct brcmf_cfg80211_info *cfg;
+ struct wiphy *wiphy;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
s32 err = 0;
if (!ndev) {
@@ -5158,35 +4988,45 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
return NULL;
}
- wdev = brcmf_alloc_wdev(busdev);
- if (IS_ERR(wdev)) {
+ ifp = netdev_priv(ndev);
+ wiphy = brcmf_setup_wiphy(busdev);
+ if (IS_ERR(wiphy))
return NULL;
- }
- wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
- cfg = wdev_to_cfg(wdev);
- cfg->wdev = wdev;
+ cfg = wiphy_priv(wiphy);
+ cfg->wiphy = wiphy;
cfg->pub = drvr;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
+ INIT_LIST_HEAD(&cfg->vif_list);
+
+ vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+ if (IS_ERR(vif)) {
+ wiphy_free(wiphy);
+ return NULL;
+ }
+
err = wl_init_priv(cfg);
if (err) {
WL_ERR("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
+ ifp->vif = vif;
return cfg;
cfg80211_attach_out:
- brcmf_free_wdev(cfg);
+ brcmf_free_vif(vif);
return NULL;
}
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_cfg80211_vif *tmp;
+
wl_deinit_priv(cfg);
- brcmf_free_wdev(cfg);
+ list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
+ brcmf_free_vif(vif);
+ }
}
void
@@ -5202,22 +5042,18 @@ brcmf_cfg80211_event(struct net_device *ndev,
static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
{
- /* Room for "event_msgs" + '\0' + bitvec */
- s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
s32 err = 0;
WL_TRACE("Enter\n");
/* Setup event_msgs */
- brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_fil_iovar_data_get(netdev_priv(ndev), "event_msgs",
+ eventmask, BRCMF_EVENTING_MASK_LEN);
if (err) {
WL_ERR("Get event_msgs error (%d)\n", err);
goto dongle_eventmsg_out;
}
- memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
setbit(eventmask, BRCMF_E_SET_SSID);
setbit(eventmask, BRCMF_E_ROAM);
@@ -5241,9 +5077,8 @@ static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
setbit(eventmask, BRCMF_E_ESCAN_RESULT);
setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
- brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "event_msgs",
+ eventmask, BRCMF_EVENTING_MASK_LEN);
if (err) {
WL_ERR("Set event_msgs error (%d)\n", err);
goto dongle_eventmsg_out;
@@ -5257,23 +5092,17 @@ dongle_eventmsg_out:
static s32
brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
{
- s8 iovbuf[32];
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
__le32 roamtrigger[2];
__le32 roam_delta[2];
- __le32 bcn_to_le;
- __le32 roamvar_le;
/*
* Setup timeout if Beacons are lost and roam is
* off to report link down
*/
if (roamvar) {
- bcn_to_le = cpu_to_le32(bcn_timeout);
- brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le,
- sizeof(bcn_to_le), iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
if (err) {
WL_ERR("bcn_timeout error (%d)\n", err);
goto dongle_rom_out;
@@ -5285,10 +5114,7 @@ brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
* to take care of roaming
*/
WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On");
- roamvar_le = cpu_to_le32(roamvar);
- brcmf_c_mkiovar("roam_off", (char *)&roamvar_le,
- sizeof(roamvar_le), iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
if (err) {
WL_ERR("roam_off error (%d)\n", err);
goto dongle_rom_out;
@@ -5296,8 +5122,8 @@ brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL);
roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER,
- (void *)roamtrigger, sizeof(roamtrigger));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
+ (void *)roamtrigger, sizeof(roamtrigger));
if (err) {
WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
goto dongle_rom_out;
@@ -5305,8 +5131,8 @@ brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA,
- (void *)roam_delta, sizeof(roam_delta));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
+ (void *)roam_delta, sizeof(roam_delta));
if (err) {
WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err);
goto dongle_rom_out;
@@ -5320,13 +5146,11 @@ static s32
brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
s32 scan_unassoc_time, s32 scan_passive_time)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- __le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time);
- __le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time);
- __le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- &scan_assoc_tm_le, sizeof(scan_assoc_tm_le));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ scan_assoc_time);
if (err) {
if (err == -EOPNOTSUPP)
WL_INFO("Scan assoc time is not supported\n");
@@ -5334,8 +5158,8 @@ brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
WL_ERR("Scan assoc time error (%d)\n", err);
goto dongle_scantime_out;
}
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ scan_unassoc_time);
if (err) {
if (err == -EOPNOTSUPP)
WL_INFO("Scan unassoc time is not supported\n");
@@ -5344,8 +5168,8 @@ brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
goto dongle_scantime_out;
}
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME,
- &scan_passive_tm_le, sizeof(scan_passive_tm_le));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME,
+ scan_passive_time);
if (err) {
if (err == -EOPNOTSUPP)
WL_INFO("Scan passive time is not supported\n");
@@ -5360,13 +5184,14 @@ dongle_scantime_out:
static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct wiphy *wiphy;
s32 phy_list;
s8 phy;
s32 err = 0;
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
- &phy_list, sizeof(phy_list));
+ err = brcmf_fil_cmd_data_get(ifp, BRCM_GET_PHYLIST,
+ &phy_list, sizeof(phy_list));
if (err) {
WL_ERR("error (%d)\n", err);
return err;
@@ -5408,7 +5233,8 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
goto default_conf_out;
power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM,
+ power_mode);
if (err)
goto default_conf_out;
WL_INFO("power save set to %s\n",
@@ -5436,47 +5262,12 @@ default_conf_out:
}
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
-{
- char buf[10+IFNAMSIZ];
- struct dentry *fd;
- s32 err = 0;
-
- sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
- cfg->debugfsdir = debugfs_create_dir(buf,
- cfg_to_wiphy(cfg)->debugfsdir);
-
- fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
- (u16 *)&cfg->profile->beacon_interval);
- if (!fd) {
- err = -ENOMEM;
- goto err_out;
- }
-
- fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
- (u8 *)&cfg->profile->dtim_period);
- if (!fd) {
- err = -ENOMEM;
- goto err_out;
- }
-
-err_out:
- return err;
-}
-
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
-{
- debugfs_remove_recursive(cfg->debugfsdir);
- cfg->debugfsdir = NULL;
-}
-
static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
s32 err = 0;
- set_bit(WL_STATUS_READY, &cfg->status);
-
- brcmf_debugfs_add_netdev_params(cfg);
+ set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
err = brcmf_config_dongle(cfg);
if (err)
@@ -5489,13 +5280,16 @@ static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
{
+ struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+
/*
* While going down, if associated with AP disassociate
* from AP to save power
*/
- if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
- test_bit(WL_STATUS_READY, &cfg->status)) {
+ if ((test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state) ||
+ test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) &&
+ check_vif_up(ifp->vif)) {
WL_INFO("Disassociating from AP");
brcmf_link_down(cfg);
@@ -5507,9 +5301,7 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
}
brcmf_abort_scanning(cfg);
- clear_bit(WL_STATUS_READY, &cfg->status);
-
- brcmf_debugfs_remove_netdev(cfg);
+ clear_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 71ced17..1dd96f1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -127,15 +127,15 @@ do { \
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define IE_MAX_LEN 512
-/* dongle status */
-enum wl_status {
- WL_STATUS_READY,
- WL_STATUS_SCANNING,
- WL_STATUS_SCAN_ABORTING,
- WL_STATUS_CONNECTING,
- WL_STATUS_CONNECTED,
- WL_STATUS_AP_CREATING,
- WL_STATUS_AP_CREATED
+/**
+ * enum brcmf_scan_status - dongle scan status
+ *
+ * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle.
+ * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle.
+ */
+enum brcmf_scan_status {
+ BRCMF_SCAN_STATUS_BUSY,
+ BRCMF_SCAN_STATUS_ABORT,
};
/* wi-fi mode */
@@ -145,19 +145,6 @@ enum wl_mode {
WL_MODE_AP
};
-/* dongle profile list */
-enum wl_prof_list {
- WL_PROF_MODE,
- WL_PROF_SSID,
- WL_PROF_SEC,
- WL_PROF_IBSS,
- WL_PROF_BAND,
- WL_PROF_BSSID,
- WL_PROF_ACT,
- WL_PROF_BEACONINT,
- WL_PROF_DTIMPERIOD
-};
-
/* dongle iscan state */
enum wl_iscan_state {
WL_ISCAN_STATE_IDLE,
@@ -214,25 +201,73 @@ struct brcmf_cfg80211_security {
u32 wpa_auth;
};
-/* ibss information for currently joined ibss network */
-struct brcmf_cfg80211_ibss {
- u8 beacon_interval; /* in millisecond */
- u8 atim; /* in millisecond */
- s8 join_only;
- u8 band;
- u8 channel;
-};
-
-/* dongle profile */
+/**
+ * struct brcmf_cfg80211_profile - profile information.
+ *
+ * @ssid: ssid of associated/associating ap.
+ * @bssid: bssid of joined/joining ibss.
+ * @sec: security information.
+ */
struct brcmf_cfg80211_profile {
- u32 mode;
struct brcmf_ssid ssid;
u8 bssid[ETH_ALEN];
- u16 beacon_interval;
- u8 dtim_period;
struct brcmf_cfg80211_security sec;
- struct brcmf_cfg80211_ibss ibss;
- s32 band;
+};
+
+/**
+ * enum brcmf_vif_status - bit indices for vif status.
+ *
+ * @BRCMF_VIF_STATUS_READY: ready for operation.
+ * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
+ * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
+ * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
+ */
+enum brcmf_vif_status {
+ BRCMF_VIF_STATUS_READY,
+ BRCMF_VIF_STATUS_CONNECTING,
+ BRCMF_VIF_STATUS_CONNECTED,
+ BRCMF_VIF_STATUS_AP_CREATING,
+ BRCMF_VIF_STATUS_AP_CREATED
+};
+
+/**
+ * struct vif_saved_ie - holds saved IEs for a virtual interface.
+ *
+ * @probe_res_ie: IE info for probe response.
+ * @beacon_ie: IE info for beacon frame.
+ * @probe_res_ie_len: IE info length for probe response.
+ * @beacon_ie_len: IE info length for beacon frame.
+ */
+struct vif_saved_ie {
+ u8 probe_res_ie[IE_MAX_LEN];
+ u8 beacon_ie[IE_MAX_LEN];
+ u32 probe_res_ie_len;
+ u32 beacon_ie_len;
+};
+
+/**
+ * struct brcmf_cfg80211_vif - virtual interface specific information.
+ *
+ * @ifp: lower layer interface pointer
+ * @wdev: wireless device.
+ * @profile: profile information.
+ * @mode: operating mode.
+ * @roam_off: roaming state.
+ * @sme_state: SME state using enum brcmf_vif_status bits.
+ * @pm_block: power-management blocked.
+ * @list: linked list.
+ */
+struct brcmf_cfg80211_vif {
+ struct brcmf_if *ifp;
+ struct wireless_dev wdev;
+ struct brcmf_cfg80211_profile profile;
+ s32 mode;
+ s32 roam_off;
+ unsigned long sme_state;
+ bool pm_block;
+ struct vif_saved_ie saved_ie;
+ struct list_head list;
};
/* dongle iscan event loop */
@@ -383,7 +418,7 @@ struct brcmf_pno_scanresults_le {
/**
* struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
*
- * @wdev: representing wl cfg80211 device.
+ * @wiphy: wiphy object for cfg80211 interface.
* @conf: dongle configuration.
* @scan_request: cfg80211 scan request object.
* @el: main event loop.
@@ -395,12 +430,11 @@ struct brcmf_pno_scanresults_le {
* @scan_req_int: internal scan request object.
* @bss_info: bss information for cfg80211 layer.
* @ie: information element object for internal purpose.
- * @profile: holding dongle profile.
* @iscan: iscan controller information.
* @conn_info: association info.
* @pmk_list: wpa2 pmk list.
* @event_work: event handler work struct.
- * @status: current dongle status.
+ * @scan_status: scan activity on the dongle.
* @pub: common driver information.
* @channel: current channel.
* @iscan_on: iscan on/off switch.
@@ -422,10 +456,11 @@ struct brcmf_pno_scanresults_le {
* @escan_timeout_work: scan timeout worker.
* @escan_ioctl_buf: dongle command buffer for escan commands.
* @ap_info: host ap information.
- * @ci: used to link this structure to netdev private data.
+ * @vif_list: linked list of vif instances.
+ * @vif_cnt: number of vif instances.
*/
struct brcmf_cfg80211_info {
- struct wireless_dev *wdev;
+ struct wiphy *wiphy;
struct brcmf_cfg80211_conf *conf;
struct cfg80211_scan_request *scan_request;
struct brcmf_cfg80211_event_loop el;
@@ -437,12 +472,11 @@ struct brcmf_cfg80211_info {
struct brcmf_cfg80211_scan_req *scan_req_int;
struct wl_cfg80211_bss_info *bss_info;
struct brcmf_cfg80211_ie ie;
- struct brcmf_cfg80211_profile *profile;
struct brcmf_cfg80211_iscan_ctrl *iscan;
struct brcmf_cfg80211_connect_info conn_info;
struct brcmf_cfg80211_pmk_list *pmk_list;
struct work_struct event_work;
- unsigned long status;
+ unsigned long scan_status;
struct brcmf_pub *pub;
u32 channel;
bool iscan_on;
@@ -464,11 +498,13 @@ struct brcmf_cfg80211_info {
struct work_struct escan_timeout_work;
u8 *escan_ioctl_buf;
struct ap_info *ap_info;
+ struct list_head vif_list;
+ u8 vif_cnt;
};
-static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
{
- return w->wdev->wiphy;
+ return cfg->wiphy;
}
static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
@@ -481,9 +517,12 @@ static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
}
-static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
+static inline
+struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
{
- return cfg->wdev->netdev;
+ struct brcmf_cfg80211_vif *vif;
+ vif = list_first_entry(&cfg->vif_list, struct brcmf_cfg80211_vif, list);
+ return vif->wdev.netdev;
}
static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
@@ -491,6 +530,12 @@ static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
return wdev_to_cfg(ndev->ieee80211_ptr);
}
+static inline struct brcmf_cfg80211_profile *ndev_to_prof(struct net_device *nd)
+{
+ struct brcmf_if *ifp = netdev_priv(nd);
+ return &ifp->vif->profile;
+}
+
#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
#define cfg_to_iscan(w) (w->iscan)
@@ -500,9 +545,7 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
return &cfg->conn_info;
}
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
- struct device *busdev,
- struct brcmf_pub *drvr);
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr);
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
/* event handler from dongle */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index b89f127..de96290 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -692,7 +692,7 @@ void ai_pci_up(struct si_pub *sih)
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
}
/* Unconfigure and/or apply various WARs when going down */
@@ -703,7 +703,7 @@ void ai_pci_down(struct si_pub *sih)
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 75086b3..565c15a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -5077,7 +5077,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
* Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
- bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci, wlc_hw->d11core,
+ bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
true);
/*
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index df7050a..d39e3e2 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -415,7 +415,7 @@ static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
ssid = pos + 2;
ssid_len = pos[1];
break;
- case WLAN_EID_GENERIC:
+ case WLAN_EID_VENDOR_SPECIFIC:
if (pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 &&
pos[4] == 0xf2 && pos[5] == 1) {
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 02e0579..95a1ca1 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1108,7 +1108,7 @@ static const char *get_info_element_string(u16 id)
MFIE_STRING(ERP_INFO);
MFIE_STRING(RSN);
MFIE_STRING(EXT_SUPP_RATES);
- MFIE_STRING(GENERIC);
+ MFIE_STRING(VENDOR_SPECIFIC);
MFIE_STRING(QOS_PARAMETER);
default:
return "UNKNOWN";
@@ -1248,8 +1248,8 @@ static int libipw_parse_info_param(struct libipw_info_element
LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n");
break;
- case WLAN_EID_GENERIC:
- LIBIPW_DEBUG_MGMT("WLAN_EID_GENERIC: %d bytes\n",
+ case WLAN_EID_VENDOR_SPECIFIC:
+ LIBIPW_DEBUG_MGMT("WLAN_EID_VENDOR_SPECIFIC: %d bytes\n",
info_element->len);
if (!libipw_parse_qos_info_param_IE(info_element,
network))
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 408132c..30e761d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1191,8 +1191,6 @@ static void iwl_option_config(struct iwl_priv *priv)
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
- u16 radio_cfg;
-
priv->eeprom_data->sku = priv->eeprom_data->sku;
if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
@@ -1208,8 +1206,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
- radio_cfg = priv->eeprom_data->radio_cfg;
-
priv->hw_params.tx_chains_num =
num_of_ant(priv->eeprom_data->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
@@ -1334,6 +1330,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
/* Configure transport layer */
iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
/* At this point both hw and priv are allocated. */
SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
@@ -2152,8 +2151,6 @@ static int __init iwl_init(void)
{
int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
ret = iwlagn_rate_control_register();
if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 59a5f78..678717b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -25,6 +25,39 @@
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "iwl-trans.h"
+#if !defined(__IWLWIFI_DEVICE_TRACE)
+static inline bool iwl_trace_data(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (ieee80211_is_data(hdr->frame_control))
+ return skb->protocol != cpu_to_be16(ETH_P_PAE);
+ return false;
+}
+
+static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
+ void *rxbuf, size_t len)
+{
+ struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
+ struct ieee80211_hdr *hdr;
+
+ if (cmd->cmd != trans->rx_mpdu_cmd)
+ return len;
+
+ hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
+ trans->rx_mpdu_cmd_hdr_size);
+ if (!ieee80211_is_data(hdr->frame_control))
+ return len;
+ /* maybe try to identify EAPOL frames? */
+ return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+ ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
@@ -235,6 +268,48 @@ TRACE_EVENT(iwlwifi_dbg,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_data
+
+TRACE_EVENT(iwlwifi_dev_tx_data,
+ TP_PROTO(const struct device *dev,
+ struct sk_buff *skb,
+ void *data, size_t data_len),
+ TP_ARGS(dev, skb, data, data_len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ if (iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(data), data, data_len);
+ ),
+ TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_rx_data,
+ TP_PROTO(const struct device *dev,
+ const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data,
+ len - iwl_rx_trace_len(trans, rxbuf, len))
+ ),
+ TP_fast_assign(
+ size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+ DEV_ASSIGN;
+ if (offs < len)
+ memcpy(__get_dynamic_array(data),
+ ((u8 *)rxbuf) + offs, len - offs);
+ ),
+ TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
@@ -270,25 +345,28 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(const struct device *dev, void *rxbuf, size_t len),
- TP_ARGS(dev, rxbuf, len),
+ TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, rxbuf, len)
+ __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
),
TP_fast_assign(
DEV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+ memcpy(__get_dynamic_array(rxbuf), rxbuf,
+ iwl_rx_trace_len(trans, rxbuf, len));
),
TP_printk("[%s] RX cmd %#.2x",
__get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
- TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
TP_STRUCT__entry(
DEV_ENTRY
@@ -301,14 +379,15 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
+ __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->framelen = buf0_len + buf1_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ if (!iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
),
TP_printk("[%s] TX %.2x (%zu bytes)",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3dfebfb..54c41b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -327,11 +327,11 @@ u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+ const void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
- u32 *vals = buf;
+ const u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 50d3819..e1aa69f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -87,7 +87,7 @@ void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
} while (0)
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
+ const void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 9253ef1..c3a4bb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -213,6 +213,9 @@
#define SCD_CONTEXT_QUEUE_OFFSET(x)\
(SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
+#define SCD_TX_STTS_QUEUE_OFFSET(x)\
+ (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
+
#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index ff11542..f75ea6d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -444,6 +444,10 @@ enum iwl_trans_state {
* @dev_cmd_headroom: room needed for the transport's private use before the
* device_cmd for Tx - for internal use only
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ * starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ * start of the 802.11 header in the @rx_mpdu_cmd
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -457,6 +461,8 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
+ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+
bool pm_support;
wait_queue_head_t wait_command_queue;
@@ -516,6 +522,8 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
{
might_sleep();
+ WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
return trans->ops->start_fw(trans, fw);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index bb69f8f..41d821f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -419,7 +419,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+ trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index fe0fffd..f95d88d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -300,7 +300,7 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
u32 scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
u8 buf[16];
int i;
@@ -1385,11 +1385,13 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
- trace_iwlwifi_dev_tx(trans->dev,
+ trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
skb->data + hdr_len, secondlen);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb,
+ skb->data + hdr_len, secondlen);
/* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1514,14 +1516,13 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
- int freed = 0;
spin_lock(&txq->lock);
if (txq->q.read_ptr != tfd_num) {
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
txq_id, txq->q.read_ptr, tfd_num, ssn);
- freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
+ iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 105e3af..db3efbb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -480,21 +480,20 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 rd_ptr, wr_ptr;
- int n_bd = trans_pcie->txq[txq_id].q.n_bd;
+ u32 stts_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+ static const u32 zero_val[4] = {};
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
- wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
+ iwl_txq_set_inactive(trans, txq_id);
- WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
- txq_id, rd_ptr, wr_ptr);
+ _iwl_write_targ_mem_dwords(trans, stts_addr,
+ zero_val, ARRAY_SIZE(zero_val));
- iwl_txq_set_inactive(trans, txq_id);
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -549,7 +548,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
* allocated into separate TFDs, then we will need to
* increase the size of the buffers.
*/
- if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ trans_pcie_get_cmd_string(trans_pcie, cmd->id),
+ cmd->id, copy_size))
return -EINVAL;
spin_lock_bh(&txq->lock);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 9780775..3e81264 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -101,7 +101,7 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
switch (action) {
case CMD_ACT_MESH_CONFIG_START:
- ie->id = WLAN_EID_GENERIC;
+ ie->id = WLAN_EID_VENDOR_SPECIFIC;
ie->val.oui[0] = 0x00;
ie->val.oui[1] = 0x50;
ie->val.oui[2] = 0x43;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9402b93..4a97acd 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -58,8 +58,7 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
else
- mwifiex_process_rx_packet(priv->adapter,
- rx_tmp_ptr);
+ mwifiex_process_rx_packet(priv, rx_tmp_ptr);
}
}
@@ -106,7 +105,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
else
- mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+ mwifiex_process_rx_packet(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -442,8 +441,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, payload);
else
- mwifiex_process_rx_packet(priv->adapter,
- payload);
+ mwifiex_process_rx_packet(priv, payload);
}
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 780d3e1..fdb1eb8 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -471,13 +471,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
flag = 1;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_reg_power;
+ max_pwr = ch->max_power;
no_of_parsed_chan = 1;
continue;
}
if (ch->hw_value == next_chan + 1 &&
- ch->max_reg_power == max_pwr) {
+ ch->max_power == max_pwr) {
next_chan++;
no_of_parsed_chan++;
} else {
@@ -488,7 +488,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
no_of_triplet++;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_reg_power;
+ max_pwr = ch->max_power;
no_of_parsed_chan = 1;
}
}
@@ -1819,12 +1819,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
- if (atomic_read(&priv->wmm.tx_pkts_queued) >=
+ if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+ atomic_read(&priv->wmm.tx_pkts_queued) >=
MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
return -EBUSY;
}
+ if (priv->user_scan_cfg) {
+ dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+ return -EBUSY;
+ }
+
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
GFP_KERNEL);
if (!priv->user_scan_cfg) {
@@ -2116,7 +2122,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
}
sema_init(&priv->async_sem, 1);
- priv->scan_pending_on_block = false;
dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
@@ -2253,8 +2258,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
- wiphy->features = NL80211_FEATURE_HT_IBSS |
- NL80211_FEATURE_INACTIVITY_TIMER;
+ wiphy->features |= NL80211_FEATURE_HT_IBSS |
+ NL80211_FEATURE_INACTIVITY_TIMER |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8d46510..da6c491 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -917,21 +917,24 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
dev_err(adapter->dev, "last_cmd_index = %d\n",
adapter->dbg.last_cmd_index);
- print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_id, DBG_CMD_NUM);
- print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_act, DBG_CMD_NUM);
+ dev_err(adapter->dev, "last_cmd_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_id),
+ adapter->dbg.last_cmd_id);
+ dev_err(adapter->dev, "last_cmd_act: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_act),
+ adapter->dbg.last_cmd_act);
dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
adapter->dbg.last_cmd_resp_index);
- print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_resp_id,
- DBG_CMD_NUM);
+ dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_resp_id),
+ adapter->dbg.last_cmd_resp_id);
dev_err(adapter->dev, "last_event_index = %d\n",
adapter->dbg.last_event_index);
- print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_event, DBG_CMD_NUM);
+ dev_err(adapter->dev, "last_event: %*ph\n",
+ (int)sizeof(adapter->dbg.last_event),
+ adapter->dbg.last_event);
dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
adapter->data_sent, adapter->cmd_sent);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index b5d37a8..482faac 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -84,18 +84,19 @@ static void scan_delay_timer_fn(unsigned long data)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
if (priv->user_scan_cfg) {
- dev_dbg(priv->adapter->dev,
- "info: %s: scan aborted\n", __func__);
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
+ if (priv->scan_request) {
+ dev_dbg(priv->adapter->dev,
+ "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: scan already aborted\n");
+ }
+
kfree(priv->user_scan_cfg);
priv->user_scan_cfg = NULL;
}
-
- if (priv->scan_pending_on_block) {
- priv->scan_pending_on_block = false;
- up(&priv->async_sem);
- }
goto done;
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index eb22dd2..1df767b 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -472,6 +472,14 @@ mwifiex_open(struct net_device *dev)
static int
mwifiex_close(struct net_device *dev)
{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->scan_request) {
+ dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index c2d0ab1..81f8772 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -115,8 +115,6 @@ enum {
#define MWIFIEX_TYPE_DATA 0
#define MWIFIEX_TYPE_EVENT 3
-#define DBG_CMD_NUM 5
-
#define MAX_BITMAP_RATES_SIZE 10
#define MAX_CHANNEL_BAND_BG 14
@@ -484,7 +482,6 @@ struct mwifiex_private {
u8 nick_name[16];
u16 current_key_index;
struct semaphore async_sem;
- u8 scan_pending_on_block;
u8 report_scan_result;
struct cfg80211_scan_request *scan_request;
struct mwifiex_user_scan_cfg *user_scan_cfg;
@@ -750,9 +747,9 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
-int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
+int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
-int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
int mwifiex_process_event(struct mwifiex_adapter *adapter);
@@ -809,7 +806,7 @@ void mwifiex_hs_activated_event(struct mwifiex_private *priv,
u8 activated);
int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid,
@@ -819,9 +816,9 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
void *data_buf, void *cmd_buf);
int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
struct host_cmd_ds_command *resp);
-int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
+int mwifiex_process_sta_rx_packet(struct mwifiex_private *,
struct sk_buff *skb);
-int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9171aae..9189a32 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -153,7 +153,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
if (((bss_desc->bcn_wpa_ie) &&
((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
- WLAN_EID_WPA))) {
+ WLAN_EID_VENDOR_SPECIFIC))) {
iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -202,7 +202,7 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
- WLAN_EID_WPA)) &&
+ WLAN_EID_VENDOR_SPECIFIC)) &&
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
WLAN_EID_RSN)) &&
@@ -237,7 +237,8 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
{
if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == WLAN_EID_WPA))
+ ((*(bss_desc->bcn_wpa_ie)).
+ vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
@@ -309,7 +310,8 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) &&
+ ((*(bss_desc->bcn_wpa_ie)).
+ vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
!priv->sec_info.encryption_mode && bss_desc->privacy) {
@@ -329,7 +331,8 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) &&
+ ((*(bss_desc->bcn_wpa_ie)).
+ vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
priv->sec_info.encryption_mode && bss_desc->privacy) {
@@ -938,6 +941,11 @@ mwifiex_config_scan(struct mwifiex_private *priv,
chan_idx)->chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
+ if (*filtered_scan)
+ (scan_chan_list +
+ chan_idx)->chan_scan_mode_bitmap
+ |= MWIFIEX_DISABLE_CHAN_FILT;
+
if (user_scan_in->chan_list[chan_idx].scan_time) {
scan_dur = (u16) user_scan_in->
chan_list[chan_idx].scan_time;
@@ -1759,26 +1767,39 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
if (priv->report_scan_result)
priv->report_scan_result = false;
- if (priv->scan_pending_on_block) {
- priv->scan_pending_on_block = false;
- up(&priv->async_sem);
- }
if (priv->user_scan_cfg) {
- dev_dbg(priv->adapter->dev,
- "info: %s: sending scan results\n", __func__);
- cfg80211_scan_done(priv->scan_request, 0);
- priv->scan_request = NULL;
+ if (priv->scan_request) {
+ dev_dbg(priv->adapter->dev,
+ "info: notifying scan done\n");
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: scan already aborted\n");
+ }
+
kfree(priv->user_scan_cfg);
priv->user_scan_cfg = NULL;
}
} else {
- if (!mwifiex_wmm_lists_empty(adapter)) {
+ if (priv->user_scan_cfg && !priv->scan_request) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+ mod_timer(&priv->scan_delay_timer, jiffies);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: triggerring scan abort\n", __func__);
+ } else if (!mwifiex_wmm_lists_empty(adapter) &&
+ (priv->scan_request && (priv->scan_request->flags &
+ NL80211_SCAN_FLAG_LOW_PRIORITY))) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
adapter->scan_delay_cnt = 1;
mod_timer(&priv->scan_delay_timer, jiffies +
msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ dev_dbg(priv->adapter->dev,
+ "info: %s: deferring scan\n", __func__);
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
@@ -1891,7 +1912,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
__func__);
return -1;
}
- priv->scan_pending_on_block = true;
priv->adapter->scan_wait_q_woken = false;
@@ -1905,10 +1925,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
if (!ret)
ret = mwifiex_wait_queue_complete(priv->adapter);
- if (ret == -1) {
- priv->scan_pending_on_block = false;
- up(&priv->async_sem);
- }
+ up(&priv->async_sem);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 09e6a26..65c12eb 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -85,10 +85,6 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
if (priv->report_scan_result)
priv->report_scan_result = false;
- if (priv->scan_pending_on_block) {
- priv->scan_pending_on_block = false;
- up(&priv->async_sem);
- }
break;
case HostCmd_CMD_MAC_CONTROL:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 0c9f70b..552d72e 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -713,7 +713,7 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
priv->wpa_ie_len, priv->wpa_ie[0]);
- if (priv->wpa_ie[0] == WLAN_EID_WPA) {
+ if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
priv->sec_info.wpa_enabled = true;
} else if (priv->wpa_ie[0] == WLAN_EID_RSN) {
priv->sec_info.wpa2_enabled = true;
@@ -1253,7 +1253,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
}
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
/* Test to see if it is a WPA IE, if not, then it is a gen IE */
- if (((pvendor_ie->element_id == WLAN_EID_WPA) &&
+ if (((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
(!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) ||
(pvendor_ie->element_id == WLAN_EID_RSN)) {
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 07d32b7..b5c1095 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -38,14 +38,10 @@
*
* The completion callback is called after processing in complete.
*/
-int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct sk_buff *skb)
{
int ret;
- struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
- struct mwifiex_private *priv =
- mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
- rx_info->bss_type);
struct rx_packet_hdr *rx_pkt_hdr;
struct rxpd *local_rx_pd;
int hdr_chop;
@@ -98,9 +94,9 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
priv->rxpd_htinfo = local_rx_pd->ht_info;
- ret = mwifiex_recv_packet(adapter, skb);
+ ret = mwifiex_recv_packet(priv, skb);
if (ret == -1)
- dev_err(adapter->dev, "recv packet failed\n");
+ dev_err(priv->adapter->dev, "recv packet failed\n");
return ret;
}
@@ -117,21 +113,15 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
*
* The completion callback is called after processing in complete.
*/
-int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
struct sk_buff *skb)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
struct rxpd *local_rx_pd;
- struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
struct rx_packet_hdr *rx_pkt_hdr;
u8 ta[ETH_ALEN];
u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
- struct mwifiex_private *priv =
- mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
- rx_info->bss_type);
-
- if (!priv)
- return -1;
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -169,13 +159,13 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
while (!skb_queue_empty(&list)) {
rx_skb = __skb_dequeue(&list);
- ret = mwifiex_recv_packet(adapter, rx_skb);
+ ret = mwifiex_recv_packet(priv, rx_skb);
if (ret == -1)
dev_err(adapter->dev, "Rx of A-MSDU failed");
}
return 0;
} else if (rx_pkt_type == PKT_TYPE_MGMT) {
- ret = mwifiex_process_mgmt_packet(adapter, skb);
+ ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
dev_err(adapter->dev, "Rx of mgmt packet failed");
dev_kfree_skb_any(skb);
@@ -188,7 +178,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
*/
if (!IS_11N_ENABLED(priv) ||
memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
- mwifiex_process_rx_packet(adapter, skb);
+ mwifiex_process_rx_packet(priv, skb);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 2af2639..5cb3f7a 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -48,13 +48,19 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ if (!priv) {
+ dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
+
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- return mwifiex_process_uap_rx_packet(adapter, skb);
+ return mwifiex_process_uap_rx_packet(priv, skb);
- return mwifiex_process_sta_rx_packet(adapter, skb);
+ return mwifiex_process_sta_rx_packet(priv, skb);
}
EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index d95a2d5..8dd7224 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -188,10 +188,19 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u8 *var_pos = params->beacon.head + var_offset;
int len = params->beacon.head_len - var_offset;
+ u8 rate_len = 0;
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
- if (rate_ie)
+ if (rate_ie) {
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
+ rate_len = rate_ie->len;
+ }
+
+ rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+ params->beacon.tail,
+ params->beacon.tail_len);
+ if (rate_ie)
+ memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
return;
}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 0966ac2..a018e42 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -146,7 +146,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
}
/* Forward unicat/Inter-BSS packets to kernel. */
- return mwifiex_process_rx_packet(adapter, skb);
+ return mwifiex_process_rx_packet(priv, skb);
}
/*
@@ -159,24 +159,17 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
*
* The completion callback is called after processing is complete.
*/
-int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
struct sk_buff *skb)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct uap_rxpd *uap_rx_pd;
- struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
struct rx_packet_hdr *rx_pkt_hdr;
u16 rx_pkt_type;
u8 ta[ETH_ALEN], pkt_type;
struct mwifiex_sta_node *node;
- struct mwifiex_private *priv =
- mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
- rx_info->bss_type);
-
- if (!priv)
- return -1;
-
uap_rx_pd = (struct uap_rxpd *)(skb->data);
rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -210,7 +203,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
while (!skb_queue_empty(&list)) {
rx_skb = __skb_dequeue(&list);
- ret = mwifiex_recv_packet(adapter, rx_skb);
+ ret = mwifiex_recv_packet(priv, rx_skb);
if (ret)
dev_err(adapter->dev,
"AP:Rx A-MSDU failed");
@@ -218,7 +211,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
return 0;
} else if (rx_pkt_type == PKT_TYPE_MGMT) {
- ret = mwifiex_process_mgmt_packet(adapter, skb);
+ ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
dev_err(adapter->dev, "Rx of mgmt packet failed");
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index ae88f80..0982375 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -146,20 +146,16 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
* to the kernel.
*/
int
-mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
struct sk_buff *skb)
{
struct rxpd *rx_pd;
- struct mwifiex_private *priv;
u16 pkt_len;
if (!skb)
return -1;
rx_pd = (struct rxpd *)skb->data;
- priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
- if (!priv)
- return -1;
skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
skb_pull(skb, sizeof(pkt_len));
@@ -190,20 +186,11 @@ mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
* the function creates a blank SKB, fills it with the data from the
* received buffer and then sends this new SKB to the kernel.
*/
-int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
+int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
{
- struct mwifiex_rxinfo *rx_info;
- struct mwifiex_private *priv;
-
if (!skb)
return -1;
- rx_info = MWIFIEX_SKB_RXCB(skb);
- priv = mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
- rx_info->bss_type);
- if (!priv)
- return -1;
-
skb->dev = priv->netdev;
skb->protocol = eth_type_trans(skb, priv->netdev);
skb->ip_summed = CHECKSUM_NONE;
@@ -225,7 +212,7 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
* fragments. Currently we fail the Filesndl-ht.scr script
* for UDP, hence this fix
*/
- if ((adapter->iface_type == MWIFIEX_USB) &&
+ if ((priv->adapter->iface_type == MWIFIEX_USB) &&
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 4dadf98..5a8fec2 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -39,7 +39,7 @@ static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
{
u8 *p = data;
while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
- if ((p[0] == WLAN_EID_GENERIC) &&
+ if ((p[0] == WLAN_EID_VENDOR_SPECIFIC) &&
(memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
return p;
p += p[1] + 2;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 7f53cea2..01624dc 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -865,7 +865,7 @@ static int ezusb_firmware_download(struct ezusb_priv *upriv,
static int ezusb_access_ltv(struct ezusb_priv *upriv,
struct request_context *ctx,
u16 length, const void *data, u16 frame_type,
- void *ans_buff, int ans_size, u16 *ans_length)
+ void *ans_buff, unsigned ans_size, u16 *ans_length)
{
int req_size;
int retval = 0;
@@ -933,7 +933,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
}
if (ctx->in_rid) {
struct ezusb_packet *ans = ctx->buf;
- int exp_len;
+ unsigned exp_len;
if (ans->hermes_len != 0)
exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
@@ -949,8 +949,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
}
if (ans_buff)
- memcpy(ans_buff, ans->data,
- min_t(int, exp_len, ans_size));
+ memcpy(ans_buff, ans->data, min(exp_len, ans_size));
if (ans_length)
*ans_length = le16_to_cpu(ans->hermes_len);
}
@@ -995,7 +994,7 @@ static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
struct ezusb_priv *upriv = hw->priv;
struct request_context *ctx;
- if ((bufsize < 0) || (bufsize % 2))
+ if (bufsize % 2)
return -EINVAL;
ctx = ezusb_alloc_ctx(upriv, rid, rid);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 59474ae..c0441a7 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2520,20 +2520,37 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
return comp_value;
}
+static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
+ int power_level, int max_power)
+{
+ int delta;
+
+ if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+ return 0;
+
+ /*
+ * XXX: We don't know the maximum transmit power of our hardware since
+ * the EEPROM doesn't expose it. We only know that we are calibrated
+ * to 100% tx power.
+ *
+ * Hence, we assume the regulatory limit that cfg80211 calulated for
+ * the current channel is our maximum and if we are requested to lower
+ * the value we just reduce our tx power accordingly.
+ */
+ delta = power_level - max_power;
+ return min(delta, 0);
+}
+
static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
enum ieee80211_band band, int power_level,
u8 txpower, int delta)
{
- u32 reg;
u16 eeprom;
u8 criterion;
u8 eirp_txpower;
u8 eirp_txpower_criterion;
u8 reg_limit;
- if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
- return txpower;
-
if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
/*
* Check if eirp txpower exceed txpower_limit.
@@ -2542,11 +2559,13 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
* .11b data rate need add additional 4dbm
* when calculating eirp txpower.
*/
- rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
- criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1,
+ &eeprom);
+ criterion = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_eeprom_read(rt2x00dev,
- EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
+ &eeprom);
if (band == IEEE80211_BAND_2GHZ)
eirp_txpower_criterion = rt2x00_get_field16(eeprom,
@@ -2563,36 +2582,71 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
} else
reg_limit = 0;
- return txpower + delta - reg_limit;
+ txpower = max(0, txpower + delta - reg_limit);
+ return min_t(u8, txpower, 0xc);
}
+/*
+ * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
+ * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
+ * 4 bits for each rate (tune from 0 to 15 dBm). BBP_R1 controls transmit power
+ * for all rates, but allow to set only 4 discrete values: -12, -6, 0 and 6 dBm.
+ * Reference per rate transmit power values are located in the EEPROM at
+ * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
+ * current conditions (i.e. band, bandwidth, temperature, user settings).
+ */
static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
- enum ieee80211_band band,
+ struct ieee80211_channel *chan,
int power_level)
{
- u8 txpower;
+ u8 txpower, r1;
u16 eeprom;
- int i, is_rate_b;
- u32 reg;
- u8 r1;
- u32 offset;
- int delta;
+ u32 reg, offset;
+ int i, is_rate_b, delta, power_ctrl;
+ enum ieee80211_band band = chan->band;
/*
- * Calculate HT40 compensation delta
+ * Calculate HT40 compensation. For 40MHz we need to add or subtract
+ * value read from EEPROM (different for 2GHz and for 5GHz).
*/
delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
/*
- * calculate temperature compensation delta
+ * Calculate temperature compensation. Depends on measurement of current
+ * TSSI (Transmitter Signal Strength Indication) we know TX power (due
+ * to temperature or maybe other factors) is smaller or bigger than
+ * expected. We adjust it, based on TSSI reference and boundaries values
+ * provided in EEPROM.
*/
delta += rt2800_get_gain_calibration_delta(rt2x00dev);
/*
- * set to normal bbp tx power control mode: +/- 0dBm
+ * Decrease power according to user settings, on devices with unknown
+ * maximum tx power. For other devices we take user power_level into
+ * consideration on rt2800_compensate_txpower().
+ */
+ delta += rt2800_get_txpower_reg_delta(rt2x00dev, power_level,
+ chan->max_power);
+
+ /*
+ * BBP_R1 controls TX power for all rates, it allow to set the following
+ * gains -12, -6, 0, +6 dBm by setting values 2, 1, 0, 3 respectively.
+ *
+ * TODO: we do not use +6 dBm option to do not increase power beyond
+ * regulatory limit, however this could be utilized for devices with
+ * CAPABILITY_POWER_LIMIT.
*/
rt2800_bbp_read(rt2x00dev, 1, &r1);
- rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
+ if (delta <= -12) {
+ power_ctrl = 2;
+ delta += 12;
+ } else if (delta <= -6) {
+ power_ctrl = 1;
+ delta += 6;
+ } else {
+ power_ctrl = 0;
+ }
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
rt2800_bbp_write(rt2x00dev, 1, r1);
offset = TX_PWR_CFG_0;
@@ -2710,7 +2764,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
{
- rt2800_config_txpower(rt2x00dev, rt2x00dev->curr_band,
+ rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.channel,
rt2x00dev->tx_power);
}
EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
@@ -2845,11 +2899,11 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
rt2800_config_channel(rt2x00dev, libconf->conf,
&libconf->rf, &libconf->channel);
- rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band,
+ rt2800_config_txpower(rt2x00dev, libconf->conf->channel,
libconf->conf->power_level);
}
if (flags & IEEE80211_CONF_CHANGE_POWER)
- rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band,
+ rt2800_config_txpower(rt2x00dev, libconf->conf->channel,
libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt2800_config_retry_limit(rt2x00dev, libconf);
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 5b4b4d4..ca69e35 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -52,11 +52,8 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
u32 target_content = 0;
u8 entry_i;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
- key_cont_128[0], key_cont_128[1],
- key_cont_128[2], key_cont_128[3],
- key_cont_128[4], key_cont_128[5]);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "key_cont_128: %6phC\n",
+ key_cont_128);
for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1ca4e25..1cdf5a2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -43,8 +43,8 @@
#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
((RTLPRIV(_priv))->mac80211.opmode == \
NL80211_IFTYPE_ADHOC) ? \
- ((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \
- ((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb)
+ ((RTLPRIV(_priv))->dm.entry_min_undec_sm_pwdb) : \
+ ((RTLPRIV(_priv))->dm.undec_sm_pwdb)
static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
0x7f8001fe,
@@ -167,18 +167,18 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
dm_digtable->cur_igvalue = 0x20;
dm_digtable->pre_igvalue = 0x0;
- dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
- dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
- dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+ dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+ dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
+ dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
dm_digtable->rx_gain_range_max = DM_DIG_MAX;
dm_digtable->rx_gain_range_min = DM_DIG_MIN;
- dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
- dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
- dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
}
@@ -189,22 +189,21 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
long rssi_val_min = 0;
- if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
- (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
- if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
+ if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
+ (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
+ if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
rssi_val_min =
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
- rtlpriv->dm.undecorated_smoothed_pwdb) ?
- rtlpriv->dm.undecorated_smoothed_pwdb :
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ (rtlpriv->dm.entry_min_undec_sm_pwdb >
+ rtlpriv->dm.undec_sm_pwdb) ?
+ rtlpriv->dm.undec_sm_pwdb :
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
else
- rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
- } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
- dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
- rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
- } else if (dm_digtable->curmultista_connectstate ==
- DIG_MULTISTA_CONNECT) {
- rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+ } else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT ||
+ dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) {
+ rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
+ } else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
+ rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
}
return (u8) rssi_val_min;
@@ -286,37 +285,33 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+ struct dig_t *digtable = &rtlpriv->dm_digtable;
- if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) {
- if ((dm_digtable->backoff_val - 2) <
- dm_digtable->backoff_val_range_min)
- dm_digtable->backoff_val =
- dm_digtable->backoff_val_range_min;
+ if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) {
+ if ((digtable->back_val - 2) < digtable->back_range_min)
+ digtable->back_val = digtable->back_range_min;
else
- dm_digtable->backoff_val -= 2;
- } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) {
- if ((dm_digtable->backoff_val + 2) >
- dm_digtable->backoff_val_range_max)
- dm_digtable->backoff_val =
- dm_digtable->backoff_val_range_max;
+ digtable->back_val -= 2;
+ } else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) {
+ if ((digtable->back_val + 2) > digtable->back_range_max)
+ digtable->back_val = digtable->back_range_max;
else
- dm_digtable->backoff_val += 2;
+ digtable->back_val += 2;
}
- if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) >
- dm_digtable->rx_gain_range_max)
- dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max;
- else if ((dm_digtable->rssi_val_min + 10 -
- dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min)
- dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min;
+ if ((digtable->rssi_val_min + 10 - digtable->back_val) >
+ digtable->rx_gain_range_max)
+ digtable->cur_igvalue = digtable->rx_gain_range_max;
+ else if ((digtable->rssi_val_min + 10 -
+ digtable->back_val) < digtable->rx_gain_range_min)
+ digtable->cur_igvalue = digtable->rx_gain_range_min;
else
- dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
- dm_digtable->backoff_val;
+ digtable->cur_igvalue = digtable->rssi_val_min + 10 -
+ digtable->back_val;
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "rssi_val_min = %x backoff_val %x\n",
- dm_digtable->rssi_val_min, dm_digtable->backoff_val);
+ "rssi_val_min = %x back_val %x\n",
+ digtable->rssi_val_min, digtable->back_val);
rtl92c_dm_write_dig(hw);
}
@@ -327,14 +322,14 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb;
bool multi_sta = false;
if (mac->opmode == NL80211_IFTYPE_ADHOC)
multi_sta = true;
if (!multi_sta ||
- dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
+ dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
initialized = false;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
@@ -345,7 +340,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
rtl92c_dm_write_dig(hw);
}
- if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+ if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
(dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
@@ -367,8 +362,8 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "curmultista_connectstate = %x dig_ext_port_stage %x\n",
- dm_digtable->curmultista_connectstate,
+ "curmultista_cstate = %x dig_ext_port_stage %x\n",
+ dm_digtable->curmultista_cstate,
dm_digtable->dig_ext_port_stage);
}
@@ -378,15 +373,14 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "presta_connectstate = %x, cursta_connectstate = %x\n",
- dm_digtable->presta_connectstate,
- dm_digtable->cursta_connectstate);
+ "presta_cstate = %x, cursta_cstate = %x\n",
+ dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
- if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
- || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
- || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
+ if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
+ dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
+ dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
- if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
+ if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
dm_digtable->rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -394,7 +388,7 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
} else {
dm_digtable->rssi_val_min = 0;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
- dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
dm_digtable->cur_igvalue = 0x20;
dm_digtable->pre_igvalue = 0;
rtl92c_dm_write_dig(hw);
@@ -407,7 +401,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
+ if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +478,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
return;
if (mac->link_state >= MAC80211_LINKED)
- dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
+ dm_digtable->cursta_cstate = DIG_STA_CONNECT;
else
- dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
+ dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
rtl92c_dm_initial_gain_sta(hw);
rtl92c_dm_initial_gain_multi_sta(hw);
rtl92c_dm_cck_packet_detection_thresh(hw);
- dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
+ dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
}
@@ -526,9 +520,9 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->backoff_val);
+ dm_digtable->back_val);
dm_digtable->cur_igvalue += 2;
if (dm_digtable->cur_igvalue > 0x3f)
@@ -555,20 +549,18 @@ static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
return;
if (tmpentry_max_pwdb != 0) {
- rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
- tmpentry_max_pwdb;
+ rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
} else {
- rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+ rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
}
if (tmpentry_min_pwdb != 0xff) {
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
- tmpentry_min_pwdb;
+ rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
} else {
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+ rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
}
- h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
h2c_parameter[0] = 0;
rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
@@ -1160,7 +1152,7 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rate_adaptive *p_ra = &(rtlpriv->ra);
- u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+ u32 low_rssi_thresh, high_rssi_thresh;
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
@@ -1179,35 +1171,33 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
mac->opmode == NL80211_IFTYPE_STATION) {
switch (p_ra->pre_ratr_state) {
case DM_RATR_STA_HIGH:
- high_rssithresh_for_ra = 50;
- low_rssithresh_for_ra = 20;
+ high_rssi_thresh = 50;
+ low_rssi_thresh = 20;
break;
case DM_RATR_STA_MIDDLE:
- high_rssithresh_for_ra = 55;
- low_rssithresh_for_ra = 20;
+ high_rssi_thresh = 55;
+ low_rssi_thresh = 20;
break;
case DM_RATR_STA_LOW:
- high_rssithresh_for_ra = 50;
- low_rssithresh_for_ra = 25;
+ high_rssi_thresh = 50;
+ low_rssi_thresh = 25;
break;
default:
- high_rssithresh_for_ra = 50;
- low_rssithresh_for_ra = 20;
+ high_rssi_thresh = 50;
+ low_rssi_thresh = 20;
break;
}
- if (rtlpriv->dm.undecorated_smoothed_pwdb >
- (long)high_rssithresh_for_ra)
+ if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh)
p_ra->ratr_state = DM_RATR_STA_HIGH;
- else if (rtlpriv->dm.undecorated_smoothed_pwdb >
- (long)low_rssithresh_for_ra)
+ else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi_thresh)
p_ra->ratr_state = DM_RATR_STA_MIDDLE;
else
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n",
- rtlpriv->dm.undecorated_smoothed_pwdb);
+ rtlpriv->dm.undec_sm_pwdb);
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
"RSSI_LEVEL = %d\n", p_ra->ratr_state);
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
@@ -1315,7 +1305,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (((mac->link_state == MAC80211_NOLINK)) &&
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
}
@@ -1323,20 +1313,19 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable->rssi_val_min =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
dm_pstable->rssi_val_min);
} else {
- dm_pstable->rssi_val_min =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
dm_pstable->rssi_val_min);
}
} else {
dm_pstable->rssi_val_min =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
@@ -1368,7 +1357,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
@@ -1379,7 +1368,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if ((mac->link_state < MAC80211_LINKED) &&
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
@@ -1391,41 +1380,35 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
- if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
- } else if ((undecorated_smoothed_pwdb <
- (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
- (undecorated_smoothed_pwdb >=
- TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+ } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+ (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
- } else if (undecorated_smoothed_pwdb <
- (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
@@ -1473,48 +1456,46 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
u8 curr_bt_rssi_state = 0x00;
if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
- undecorated_smoothed_pwdb =
- GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
+ undec_sm_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
} else {
- if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)
- undecorated_smoothed_pwdb = 100;
+ if (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)
+ undec_sm_pwdb = 100;
else
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
}
/* Check RSSI to determine HighPower/NormalPower state for
* BT coexistence. */
- if (undecorated_smoothed_pwdb >= 67)
+ if (undec_sm_pwdb >= 67)
curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER);
- else if (undecorated_smoothed_pwdb < 62)
+ else if (undec_sm_pwdb < 62)
curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER;
/* Check RSSI to determine AMPDU setting for BT coexistence. */
- if (undecorated_smoothed_pwdb >= 40)
+ if (undec_sm_pwdb >= 40)
curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF);
- else if (undecorated_smoothed_pwdb <= 32)
+ else if (undec_sm_pwdb <= 32)
curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF;
/* Marked RSSI state. It will be used to determine BT coexistence
* setting later. */
- if (undecorated_smoothed_pwdb < 35)
+ if (undec_sm_pwdb < 35)
curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW;
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
/* Set Tx Power according to BT status. */
- if (undecorated_smoothed_pwdb >= 30)
+ if (undec_sm_pwdb >= 30)
curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
- else if (undecorated_smoothed_pwdb < 25)
+ else if (undec_sm_pwdb < 25)
curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
/* Check BT state related to BT_Idle in B/G mode. */
- if (undecorated_smoothed_pwdb < 15)
+ if (undec_sm_pwdb < 15)
curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
else
curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index cdcad7d..1d5d360 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -34,9 +34,6 @@
#include "dm_common.h"
#include "phy_common.h"
-/* Define macro to shorten lines */
-#define MCS_TXPWR mcs_txpwrlevel_origoffset
-
u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -138,13 +135,13 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
BIT(8));
if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
BLSSIREADBACKDATA);
else
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rflssi_readback, retvalue);
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
@@ -290,11 +287,11 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
else
return;
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index] = data;
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
rtlphy->pwrgroup_cnt, index,
- rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index]);
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
if (index == 13)
rtlphy->pwrgroup_cnt++;
@@ -374,14 +371,10 @@ void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
- rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
@@ -393,47 +386,33 @@ void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
- ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
- ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
- ROFDM0_XCRXIQIMBANLANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
- ROFDM0_XDRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
- ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
- ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
- ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
- ROFDM0_XDTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
- RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
- RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
- RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
- RFPGA0_XD_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
- TRANSCEIVEA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
- TRANSCEIVEB_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
}
EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
@@ -724,6 +703,26 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
+static void _rtl92c_phy_sw_rf_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ if (channel == 6 && rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20)
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
+ 0x00255);
+ else{
+ u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A,
+ RF_RX_G1, RFREG_OFFSET_MASK);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
+ backupRF0x1A);
+ }
+ }
+}
+
static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
u32 cmdtableidx, u32 cmdtablesz,
enum swchnlcmd_id cmdid,
@@ -837,6 +836,7 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
currentcmd->para1,
RFREG_OFFSET_MASK,
rtlphy->rfreg_chnlval[rfpath]);
+ _rtl92c_phy_sw_rf_setting(hw, channel);
}
break;
default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 2925094..3cfa1bb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -116,6 +116,9 @@
LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
#define CHIP_VER_B BIT(4)
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+#define RF_TYPE_1T2R BIT(1)
#define CHIP_92C_BITMASK BIT(0)
#define CHIP_UNKNOWN BIT(7)
#define CHIP_92C_1T2R 0x03
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 27b3af8..74f9c08 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -41,7 +41,7 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
@@ -52,7 +52,7 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if ((mac->link_state < MAC80211_LINKED) &&
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
@@ -64,41 +64,35 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
- if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
- } else if ((undecorated_smoothed_pwdb <
- (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
- (undecorated_smoothed_pwdb >=
- TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+ } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+ (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
- } else if (undecorated_smoothed_pwdb <
- (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 86d73b3..d1f34f6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -896,7 +896,6 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- static bool iqk_initialized; /* initialized to false */
bool rtstatus = true;
bool is92c;
int err;
@@ -921,9 +920,28 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
rtlhal->last_hmeboxnum = 0;
rtl92c_phy_mac_config(hw);
+ /* because last function modify RCR, so we update
+ * rcr var here, or TP will unstable for receive_config
+ * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252*/
+ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
rtl92c_phy_bb_config(hw);
rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
rtl92c_phy_rf_config(hw);
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+ !IS_92C_SERIAL(rtlhal->version)) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_SYN_G2, MASKDWORD, 0x4F200);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK1, MASKDWORD, 0x44053);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK2, MASKDWORD, 0x80201);
+ }
rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
RF_CHNLBW, RFREG_OFFSET_MASK);
rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
@@ -945,11 +963,11 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
if (ppsc->rfpwr_state == ERFON) {
rtl92c_phy_set_rfpath_switch(hw, 1);
- if (iqk_initialized) {
+ if (rtlphy->iqk_initialized) {
rtl92c_phy_iq_calibrate(hw, true);
} else {
rtl92c_phy_iq_calibrate(hw, false);
- iqk_initialized = true;
+ rtlphy->iqk_initialized = true;
}
rtl92c_dm_check_txpower_tracking(hw);
@@ -1004,6 +1022,13 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) |
CHIP_VENDOR_UMC));
}
+ if (IS_92C_SERIAL(version)) {
+ value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
+ version = (enum version_8192c)(version |
+ ((CHIP_BONDING_IDENTIFIER(value32)
+ == CHIP_BONDING_92C_1T2R) ?
+ RF_TYPE_1T2R : 0));
+ }
}
switch (version) {
@@ -1019,12 +1044,30 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
case VERSION_A_CHIP_88C:
versionid = "A_CHIP_88C";
break;
+ case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
+ versionid = "A_CUT_92C_1T2R";
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
+ versionid = "A_CUT_92C";
+ break;
+ case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
+ versionid = "A_CUT_88C";
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
+ versionid = "B_CUT_92C_1T2R";
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
+ versionid = "B_CUT_92C";
+ break;
+ case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
+ versionid = "B_CUT_88C";
+ break;
default:
versionid = "Unknown. Bug?";
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
"Chip Version ID: %s\n", versionid);
switch (version & 0x3) {
@@ -1197,6 +1240,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 u1b_tmp;
u32 u4b_tmp;
@@ -1225,7 +1269,8 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+ if (!IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
if (rtlpcipriv->bt_coexist.bt_coexistence) {
u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
u4b_tmp |= 0x03824800;
@@ -1254,6 +1299,9 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
_rtl92ce_poweroff_adapter(hw);
+
+ /* after power off we should do iqk again */
+ rtlpriv->phy.iqk_initialized = false;
}
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -1355,9 +1403,9 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
else
tempval = EEPROM_DEFAULT_HT40_2SDIFF;
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
(tempval & 0xf);
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
((tempval & 0xf0) >> 4);
}
@@ -1381,7 +1429,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
rf_path, i,
rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]);
+ eprom_chnl_txpwr_ht40_2sdf[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++) {
for (i = 0; i < 14; i++) {
@@ -1396,14 +1444,14 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
if ((rtlefuse->
eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+ eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
> 0) {
rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
rtlefuse->
eeprom_chnlarea_txpwr_ht40_1s[rf_path]
[index] -
rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ eprom_chnl_txpwr_ht40_2sdf[rf_path]
[index];
} else {
rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
@@ -1912,16 +1960,16 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
ratr_bitmap &= 0x0f0ff0ff;
break;
}
+ sta_entry->ratr_index = ratr_index;
+
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
- rate_mask[4]);
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, rate_mask);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
if (macid != 0)
@@ -2176,7 +2224,7 @@ static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw)
if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
rtlpcipriv->bt_coexist.bt_ant_isolation =
- rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation;
+ rtlpcipriv->bt_coexist.eeprom_bt_ant_isol;
else
rtlpcipriv->bt_coexist.bt_ant_isolation =
rtlpcipriv->bt_coexist.reg_bt_iso;
@@ -2207,23 +2255,22 @@ void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
bool auto_load_fail, u8 *hwinfo)
{
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
- u8 value;
+ u8 val;
if (!auto_load_fail) {
rtlpcipriv->bt_coexist.eeprom_bt_coexist =
((hwinfo[RF_OPTION1] & 0xe0) >> 5);
- value = hwinfo[RF_OPTION4];
- rtlpcipriv->bt_coexist.eeprom_bt_type = ((value & 0xe) >> 1);
- rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1);
- rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation =
- ((value & 0x10) >> 4);
+ val = hwinfo[RF_OPTION4];
+ rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1);
+ rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1);
+ rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
- ((value & 0x20) >> 5);
+ ((val & 0x20) >> 5);
} else {
rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
- rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation = 0;
+ rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 88deae6..73262ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -82,6 +82,8 @@ bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
if (is92c)
rtl_write_byte(rtlpriv, 0x14, 0x71);
+ else
+ rtl_write_byte(rtlpriv, 0x04CA, 0x0A);
return rtstatus;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index 54c7614..a9c406f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -97,15 +97,12 @@ void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
}
if (rtlefuse->eeprom_regulatory == 0) {
- tmpval =
- (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
- (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
- 8);
+ tmpval = (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
tx_agc[RF90_PATH_A] += tmpval;
- tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
- (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
- 24);
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] << 24);
tx_agc[RF90_PATH_B] += tmpval;
}
}
@@ -209,8 +206,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
case 0:
chnlgroup = 0;
- writeVal =
- rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
+ writeVal = rtlphy->mcs_offset[chnlgroup][index +
(rf ? 8 : 0)]
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
@@ -240,8 +236,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
chnlgroup++;
}
- writeVal =
- rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ writeVal = rtlphy->mcs_offset[chnlgroup]
[index + (rf ? 8 : 0)] + ((index < 2) ?
powerBase0[rf] :
powerBase1[rf]);
@@ -276,8 +271,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
1]);
}
for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] =
- (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset
[chnlgroup][index +
(rf ? 8 : 0)] & (0x7f << (i * 8))) >>
(i * 8));
@@ -317,8 +311,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
break;
default:
chnlgroup = 0;
- writeVal =
- rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ writeVal = rtlphy->mcs_offset[chnlgroup]
[index + (rf ? 8 : 0)]
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index ea2e1bd..60451ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -162,12 +162,10 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
/* request fw */
if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
- !IS_92C_SERIAL(rtlhal->version)) {
+ !IS_92C_SERIAL(rtlhal->version))
rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
- } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
- pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n");
- }
rtlpriv->max_fw_size = 0x4000;
pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 390d6d4..d7e1f0a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -127,11 +127,11 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct phy_sts_cck_8192s_t *cck_buf;
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
s8 rx_pwr_all = 0, rx_pwr[4];
u8 evm, pwdb_all, rf_rx_num = 0;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
- bool in_powersavemode = false;
bool is_cck_rate;
is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
@@ -140,14 +140,14 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
pstats->is_cck = is_cck_rate;
pstats->packet_beacon = packet_beacon;
pstats->is_cck = is_cck_rate;
- pstats->rx_mimo_signalquality[0] = -1;
- pstats->rx_mimo_signalquality[1] = -1;
+ pstats->rx_mimo_sig_qual[0] = -1;
+ pstats->rx_mimo_sig_qual[1] = -1;
if (is_cck_rate) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
- if (!in_powersavemode)
+ if (ppsc->rfpwr_state == ERFON)
cck_highpwr = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER2,
BIT(9));
@@ -211,8 +211,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
}
pstats->signalquality = sq;
- pstats->rx_mimo_signalquality[0] = sq;
- pstats->rx_mimo_signalquality[1] = -1;
+ pstats->rx_mimo_sig_qual[0] = sq;
+ pstats->rx_mimo_sig_qual[1] = -1;
}
} else {
rtlpriv->dm.rfpath_rxenable[0] =
@@ -251,8 +251,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
if (i == 0)
pstats->signalquality =
(u8) (evm & 0xff);
- pstats->rx_mimo_signalquality[i] =
- (u8) (evm & 0xff);
+ pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
}
}
}
@@ -362,36 +361,31 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
return;
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
}
if (pstats->packet_toself || pstats->packet_beacon) {
- if (undecorated_smoothed_pwdb < 0)
- undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+ if (undec_sm_pwdb < 0)
+ undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
- undecorated_smoothed_pwdb =
- (((undecorated_smoothed_pwdb) *
+ if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
- + 1;
+ undec_sm_pwdb += 1;
} else {
- undecorated_smoothed_pwdb =
- (((undecorated_smoothed_pwdb) *
+ undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
- rtlpriv->dm.undecorated_smoothed_pwdb =
- undecorated_smoothed_pwdb;
+ rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
_rtl92ce_update_rxsignalstatistics(hw, pstats);
}
}
@@ -438,15 +432,14 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
for (n_spatialstream = 0; n_spatialstream < 2;
n_spatialstream++) {
if (pstats->
- rx_mimo_signalquality[n_spatialstream] !=
- -1) {
+ rx_mimo_sig_qual[n_spatialstream] != -1) {
if (rtlpriv->stats.
rx_evm_percentage[n_spatialstream]
== 0) {
rtlpriv->stats.
rx_evm_percentage
[n_spatialstream] =
- pstats->rx_mimo_signalquality
+ pstats->rx_mimo_sig_qual
[n_spatialstream];
}
@@ -456,8 +449,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
stats.rx_evm_percentage
[n_spatialstream] *
(RX_SMOOTH_FACTOR - 1)) +
- (pstats->
- rx_mimo_signalquality
+ (pstats->rx_mimo_sig_qual
[n_spatialstream] * 1)) /
(RX_SMOOTH_FACTOR);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index 6fd39ea..16a0b9e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -39,7 +39,7 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
if (!rtlpriv->dm.dynamic_txpower_enable)
return;
@@ -50,7 +50,7 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if ((mac->link_state < MAC80211_LINKED) &&
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
@@ -62,41 +62,35 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
- if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
- } else if ((undecorated_smoothed_pwdb <
- (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
- (undecorated_smoothed_pwdb >=
- TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+ } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+ (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
- } else if (undecorated_smoothed_pwdb <
- (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 4bbb711..b1ccff4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -152,9 +152,9 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
else
tempval = EEPROM_DEFAULT_HT40_2SDIFF;
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
(tempval & 0xf);
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
((tempval & 0xf0) >> 4);
}
for (rf_path = 0; rf_path < 2; rf_path++)
@@ -177,7 +177,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
rf_path, i,
rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]);
+ eprom_chnl_txpwr_ht40_2sdf[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++) {
for (i = 0; i < 14; i++) {
index = _rtl92c_get_chnl_group((u8) i);
@@ -189,13 +189,13 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
if ((rtlefuse->
eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+ eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
> 0) {
rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
rtlefuse->
eeprom_chnlarea_txpwr_ht40_1s[rf_path]
[index] - rtlefuse->
- eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ eprom_chnl_txpwr_ht40_2sdf[rf_path]
[index];
} else {
rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
@@ -2169,10 +2169,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
- rate_mask[4]);
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, rate_mask);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 7e91c76..32ff959 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -46,7 +46,7 @@
#define LINK_Q ui_link_quality
#define RX_EVM rx_evm_percentage
-#define RX_SIGQ rx_mimo_signalquality
+#define RX_SIGQ rx_mimo_sig_qual
void rtl92c_read_chip_version(struct ieee80211_hw *hw)
@@ -982,32 +982,27 @@ static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb = 0;
+ long undec_sm_pwdb = 0;
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
return;
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
}
if (pstats->packet_toself || pstats->packet_beacon) {
- if (undecorated_smoothed_pwdb < 0)
- undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
- undecorated_smoothed_pwdb =
- (((undecorated_smoothed_pwdb) *
+ if (undec_sm_pwdb < 0)
+ undec_sm_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
- + 1;
+ undec_sm_pwdb += 1;
} else {
- undecorated_smoothed_pwdb =
- (((undecorated_smoothed_pwdb) *
+ undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
- rtlpriv->dm.undecorated_smoothed_pwdb =
- undecorated_smoothed_pwdb;
+ rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
_rtl92c_update_rxsignalstatistics(hw, pstats);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 506b9a0..953f1a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -115,15 +115,11 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
(ppowerlevel[idx1] << 24);
}
if (rtlefuse->eeprom_regulatory == 0) {
- tmpval = (rtlphy->mcs_txpwrlevel_origoffset
- [0][6]) +
- (rtlphy->mcs_txpwrlevel_origoffset
- [0][7] << 8);
+ tmpval = (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
tx_agc[RF90_PATH_A] += tmpval;
- tmpval = (rtlphy->mcs_txpwrlevel_origoffset
- [0][14]) +
- (rtlphy->mcs_txpwrlevel_origoffset
- [0][15] << 24);
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] << 24);
tx_agc[RF90_PATH_B] += tmpval;
}
}
@@ -215,7 +211,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
switch (rtlefuse->eeprom_regulatory) {
case 0:
chnlgroup = 0;
- writeVal = rtlphy->mcs_txpwrlevel_origoffset
+ writeVal = rtlphy->mcs_offset
[chnlgroup][index + (rf ? 8 : 0)]
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
@@ -238,8 +234,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
else
chnlgroup += 4;
}
- writeVal = rtlphy->mcs_txpwrlevel_origoffset
- [chnlgroup][index +
+ writeVal = rtlphy->mcs_offset[chnlgroup][index +
(rf ? 8 : 0)] +
((index < 2) ? powerBase0[rf] :
powerBase1[rf]);
@@ -271,8 +266,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
[channel - 1]);
}
for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] =
- (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset
[chnlgroup][index + (rf ? 8 : 0)]
& (0x7f << (i * 8))) >> (i * 8));
if (rtlphy->current_chan_bw ==
@@ -306,7 +300,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
break;
default:
chnlgroup = 0;
- writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ writeVal = rtlphy->mcs_offset[chnlgroup]
[index + (rf ? 8 : 0)] + ((index < 2) ?
powerBase0[rf] : powerBase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index ed868c3..fd8df23 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -35,7 +35,7 @@
#include "dm.h"
#include "fw.h"
-#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
+#define UNDEC_SM_PWDB entry_min_undec_sm_pwdb
static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
0x7f8001fe, /* 0, +6.0dB */
@@ -164,18 +164,18 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
de_digtable->cur_igvalue = 0x20;
de_digtable->pre_igvalue = 0x0;
- de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
- de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
- de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+ de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+ de_digtable->presta_cstate = DIG_STA_DISCONNECT;
+ de_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER;
de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER;
- de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
- de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
- de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ de_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+ de_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
de_digtable->large_fa_hit = 0;
@@ -273,35 +273,34 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
/* Determine the minimum RSSI */
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
- de_digtable->min_undecorated_pwdb_for_dm = 0;
+ de_digtable->min_undec_pwdb_for_dm = 0;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
- de_digtable->min_undecorated_pwdb_for_dm =
+ de_digtable->min_undec_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
rtlpriv->dm.UNDEC_SM_PWDB);
} else {
- de_digtable->min_undecorated_pwdb_for_dm =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"STA Default Port PWDB = 0x%x\n",
- de_digtable->min_undecorated_pwdb_for_dm);
+ de_digtable->min_undec_pwdb_for_dm);
}
} else {
- de_digtable->min_undecorated_pwdb_for_dm =
- rtlpriv->dm.UNDEC_SM_PWDB;
+ de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Ext Port or disconnect PWDB = 0x%x\n",
- de_digtable->min_undecorated_pwdb_for_dm);
+ de_digtable->min_undec_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- de_digtable->min_undecorated_pwdb_for_dm);
+ de_digtable->min_undec_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -310,16 +309,16 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
unsigned long flag = 0;
- if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
+ if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
- if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
+ if (de_digtable->min_undec_pwdb_for_dm <= 25)
de_digtable->cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable->cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
} else {
- if (de_digtable->min_undecorated_pwdb_for_dm <= 20)
+ if (de_digtable->min_undec_pwdb_for_dm <= 20)
de_digtable->cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
@@ -342,7 +341,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
- de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
+ de_digtable->cursta_cstate == DIG_STA_CONNECT ?
"DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -358,9 +357,9 @@ void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
de_digtable->cur_igvalue, de_digtable->pre_igvalue,
- de_digtable->backoff_val);
+ de_digtable->back_val);
if (de_digtable->dig_enable_flag == false) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable->pre_igvalue = 0x17;
@@ -382,13 +381,13 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
- if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50
- && de_digtable->min_undecorated_pwdb_for_dm < 50) {
+ if (de_digtable->last_min_undec_pwdb_for_dm >= 50
+ && de_digtable->min_undec_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode Off\n");
- } else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 &&
- de_digtable->min_undecorated_pwdb_for_dm > 55) {
+ } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
+ de_digtable->min_undec_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode On\n");
@@ -409,8 +408,8 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
- de_digtable->last_min_undecorated_pwdb_for_dm =
- de_digtable->min_undecorated_pwdb_for_dm;
+ de_digtable->last_min_undec_pwdb_for_dm =
+ de_digtable->min_undec_pwdb_for_dm;
}
if (!rtlpriv->dm.dm_initialgain_enable)
return;
@@ -428,9 +427,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- de_digtable->cursta_connectstate = DIG_STA_CONNECT;
+ de_digtable->cursta_cstate = DIG_STA_CONNECT;
else
- de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
+ de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
/* adjust initial gain according to false alarm counter */
if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
@@ -522,7 +521,7 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
if ((!rtlpriv->dm.dynamic_txpower_enable)
|| rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -539,62 +538,62 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- undecorated_smoothed_pwdb =
+ undec_sm_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"IBSS Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb =
+ rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
} else {
- undecorated_smoothed_pwdb =
+ undec_sm_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
- if (undecorated_smoothed_pwdb >= 0x33) {
+ if (undec_sm_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
- } else if ((undecorated_smoothed_pwdb < 0x33)
- && (undecorated_smoothed_pwdb >= 0x2b)) {
+ } else if ((undec_sm_pwdb < 0x33)
+ && (undec_sm_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
- } else if (undecorated_smoothed_pwdb < 0x2b) {
+ } else if (undec_sm_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Normal\n");
}
} else {
- if (undecorated_smoothed_pwdb >=
+ if (undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
- if ((undecorated_smoothed_pwdb <
+ if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
- && (undecorated_smoothed_pwdb >=
+ && (undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
- } else if (undecorated_smoothed_pwdb <
+ } else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
@@ -620,7 +619,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
return;
/* Indicate Rx signal strength to FW. */
if (rtlpriv->dm.useramask) {
- u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb;
+ u32 temp = rtlpriv->dm.undec_sm_pwdb;
temp <<= 16;
temp |= 0x100;
@@ -629,7 +628,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
} else {
rtl_write_byte(rtlpriv, 0x4fe,
- (u8) rtlpriv->dm.undecorated_smoothed_pwdb);
+ (u8) rtlpriv->dm.undec_sm_pwdb);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index db00860..33041bd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -298,13 +298,13 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
BIT(8));
if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
BLSSIREADBACKDATA);
else
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
- rfpath, pphyreg->rflssi_readback, retvalue);
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -478,14 +478,10 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
/* RF switch Control */
/* TR/Ant switch control */
- rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
/* AGC control 1 */
rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
@@ -500,14 +496,10 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
/* RX AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
- ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
- ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
- ROFDM0_XCRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
- ROFDM0_XDRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
/*RX AFE control 1 */
rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
@@ -516,14 +508,10 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
/* Tx AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
- ROFDM0_XATxIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
- ROFDM0_XBTxIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
- ROFDM0_XCTxIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
- ROFDM0_XDTxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTxIQIMBALANCE;
/* Tx AFE control 2 */
rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE;
@@ -532,20 +520,14 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE;
/* Tranceiver LSSI Readback SI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
- RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
- RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
- RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
- RFPGA0_XD_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
/* Tranceiver LSSI Readback PI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
- TRANSCEIVERA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
- TRANSCEIVERB_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
}
static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -702,12 +684,11 @@ static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
else
return;
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data;
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%ulx\n",
rtlphy->pwrgroup_cnt, index,
- rtlphy->mcs_txpwrlevel_origoffset
- [rtlphy->pwrgroup_cnt][index]);
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
if (index == 13)
rtlphy->pwrgroup_cnt++;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index 3066a7fb..20144e0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -106,11 +106,11 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
(ppowerlevel[idx1] << 24);
}
if (rtlefuse->eeprom_regulatory == 0) {
- tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
- (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8);
+ tmpval = (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
tx_agc[RF90_PATH_A] += tmpval;
- tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
- (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24);
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] << 24);
tx_agc[RF90_PATH_B] += tmpval;
}
}
@@ -227,7 +227,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
switch (rtlefuse->eeprom_regulatory) {
case 0:
chnlgroup = 0;
- writeval = rtlphy->mcs_txpwrlevel_origoffset
+ writeval = rtlphy->mcs_offset
[chnlgroup][index +
(rf ? 8 : 0)] + ((index < 2) ?
powerbase0[rf] :
@@ -247,7 +247,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
chnlgroup++;
else
chnlgroup += 4;
- writeval = rtlphy->mcs_txpwrlevel_origoffset
+ writeval = rtlphy->mcs_offset
[chnlgroup][index +
(rf ? 8 : 0)] + ((index < 2) ?
powerbase0[rf] :
@@ -280,8 +280,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
[channel - 1]);
}
for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] =
- (u8)((rtlphy->mcs_txpwrlevel_origoffset
+ pwr_diff_limit[i] = (u8)((rtlphy->mcs_offset
[chnlgroup][index + (rf ? 8 : 0)] &
(0x7f << (i * 8))) >> (i * 8));
if (rtlphy->current_chan_bw ==
@@ -316,8 +315,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
break;
default:
chnlgroup = 0;
- writeval = rtlphy->mcs_txpwrlevel_origoffset
- [chnlgroup][index +
+ writeval = rtlphy->mcs_offset[chnlgroup][index +
(rf ? 8 : 0)] + ((index < 2) ?
powerbase0[rf] : powerbase1[rf]);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 4686f34..35bb9da 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -132,8 +132,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->packet_toself = packet_toself;
pstats->packet_beacon = packet_beacon;
pstats->is_cck = is_cck_rate;
- pstats->rx_mimo_signalquality[0] = -1;
- pstats->rx_mimo_signalquality[1] = -1;
+ pstats->rx_mimo_sig_qual[0] = -1;
+ pstats->rx_mimo_sig_qual[1] = -1;
if (is_cck_rate) {
u8 report, cck_highpwr;
@@ -212,8 +212,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
sq = ((64 - sq) * 100) / 44;
}
pstats->signalquality = sq;
- pstats->rx_mimo_signalquality[0] = sq;
- pstats->rx_mimo_signalquality[1] = -1;
+ pstats->rx_mimo_sig_qual[0] = sq;
+ pstats->rx_mimo_sig_qual[1] = -1;
}
} else {
rtlpriv->dm.rfpath_rxenable[0] = true;
@@ -246,7 +246,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
if (i == 0)
pstats->signalquality =
(u8)(evm & 0xff);
- pstats->rx_mimo_signalquality[i] =
+ pstats->rx_mimo_sig_qual[i] =
(u8)(evm & 0xff);
}
}
@@ -345,33 +345,28 @@ static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
if (mac->opmode == NL80211_IFTYPE_ADHOC ||
mac->opmode == NL80211_IFTYPE_AP)
return;
else
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
if (pstats->packet_toself || pstats->packet_beacon) {
- if (undecorated_smoothed_pwdb < 0)
- undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
- undecorated_smoothed_pwdb =
- (((undecorated_smoothed_pwdb) *
+ if (undec_sm_pwdb < 0)
+ undec_sm_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undecorated_smoothed_pwdb =
- undecorated_smoothed_pwdb + 1;
+ undec_sm_pwdb = undec_sm_pwdb + 1;
} else {
- undecorated_smoothed_pwdb =
- (((undecorated_smoothed_pwdb) *
+ undec_sm_pwdb = (((undec_sm_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
- rtlpriv->dm.undecorated_smoothed_pwdb =
- undecorated_smoothed_pwdb;
+ rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
_rtl92de_update_rxsignalstatistics(hw, pstats);
}
}
@@ -383,15 +378,15 @@ static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
int stream;
for (stream = 0; stream < 2; stream++) {
- if (pstats->rx_mimo_signalquality[stream] != -1) {
+ if (pstats->rx_mimo_sig_qual[stream] != -1) {
if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
rtlpriv->stats.rx_evm_percentage[stream] =
- pstats->rx_mimo_signalquality[stream];
+ pstats->rx_mimo_sig_qual[stream];
}
rtlpriv->stats.rx_evm_percentage[stream] =
((rtlpriv->stats.rx_evm_percentage[stream]
* (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalquality[stream] * 1)) /
+ (pstats->rx_mimo_sig_qual[stream] * 1)) /
(RX_SMOOTH_FACTOR);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 465f581..bf79a52 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -267,13 +267,12 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
break;
}
- if (rtlpriv->dm.undecorated_smoothed_pwdb >
- (long)high_rssi_thresh) {
+ if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh) {
ra->ratr_state = DM_RATR_STA_HIGH;
- } else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ } else if (rtlpriv->dm.undec_sm_pwdb >
(long)middle_rssi_thresh) {
ra->ratr_state = DM_RATR_STA_LOW;
- } else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ } else if (rtlpriv->dm.undec_sm_pwdb >
(long)low_rssi_thresh) {
ra->ratr_state = DM_RATR_STA_LOW;
} else {
@@ -283,8 +282,7 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
if (ra->pre_ratr_state != ra->ratr_state) {
RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
"RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
- rtlpriv->dm.undecorated_smoothed_pwdb,
- ra->ratr_state,
+ rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
ra->pre_ratr_state, ra->ratr_state);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
@@ -316,7 +314,7 @@ static void _rtl92s_dm_switch_baseband_mrc(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MRC, (u8 *)(&current_mrc));
if (mac->link_state >= MAC80211_LINKED) {
- if (rtlpriv->dm.undecorated_smoothed_pwdb > tmpentry_maxpwdb) {
+ if (rtlpriv->dm.undec_sm_pwdb > tmpentry_maxpwdb) {
rssi_a = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_A];
rssi_b = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_B];
}
@@ -424,18 +422,18 @@ static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw)
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
if (falsealm_cnt->cnt_all > digtable->fa_highthresh) {
- if ((digtable->backoff_val - 6) <
+ if ((digtable->back_val - 6) <
digtable->backoffval_range_min)
- digtable->backoff_val = digtable->backoffval_range_min;
+ digtable->back_val = digtable->backoffval_range_min;
else
- digtable->backoff_val -= 6;
+ digtable->back_val -= 6;
} else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) {
- if ((digtable->backoff_val + 6) >
+ if ((digtable->back_val + 6) >
digtable->backoffval_range_max)
- digtable->backoff_val =
+ digtable->back_val =
digtable->backoffval_range_max;
else
- digtable->backoff_val += 6;
+ digtable->back_val += 6;
}
}
@@ -447,28 +445,28 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
static u8 initialized, force_write;
u8 initial_gain = 0;
- if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) ||
- (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
- if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
+ if ((digtable->pre_sta_cstate == digtable->cur_sta_cstate) ||
+ (digtable->cur_sta_cstate == DIG_STA_BEFORE_CONNECT)) {
+ if (digtable->cur_sta_cstate == DIG_STA_BEFORE_CONNECT) {
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
if (digtable->backoff_enable_flag)
rtl92s_backoff_enable_flag(hw);
else
- digtable->backoff_val = DM_DIG_BACKOFF;
+ digtable->back_val = DM_DIG_BACKOFF;
- if ((digtable->rssi_val + 10 - digtable->backoff_val) >
+ if ((digtable->rssi_val + 10 - digtable->back_val) >
digtable->rx_gain_range_max)
digtable->cur_igvalue =
digtable->rx_gain_range_max;
- else if ((digtable->rssi_val + 10 - digtable->backoff_val)
+ else if ((digtable->rssi_val + 10 - digtable->back_val)
< digtable->rx_gain_range_min)
digtable->cur_igvalue =
digtable->rx_gain_range_min;
else
digtable->cur_igvalue = digtable->rssi_val + 10 -
- digtable->backoff_val;
+ digtable->back_val;
if (falsealm_cnt->cnt_all > 10000)
digtable->cur_igvalue =
@@ -490,7 +488,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
- digtable->backoff_val = DM_DIG_BACKOFF;
+ digtable->back_val = DM_DIG_BACKOFF;
digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
digtable->pre_igvalue = 0;
return;
@@ -528,14 +526,14 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
- digtable->cur_sta_connectstate = DIG_STA_CONNECT;
+ digtable->cur_sta_cstate = DIG_STA_CONNECT;
else
- digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
+ digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
- digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
+ digtable->rssi_val = rtlpriv->dm.undec_sm_pwdb;
/* Change dig mode to rssi */
- if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) {
+ if (digtable->cur_sta_cstate != DIG_STA_DISCONNECT) {
if (digtable->dig_twoport_algorithm ==
DIG_TWO_PORT_ALGO_FALSE_ALARM) {
digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
@@ -546,7 +544,7 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
_rtl92s_dm_false_alarm_counter_statistics(hw);
_rtl92s_dm_initial_gain_sta_beforeconnect(hw);
- digtable->pre_sta_connectstate = digtable->cur_sta_connectstate;
+ digtable->pre_sta_cstate = digtable->cur_sta_cstate;
}
static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
@@ -573,7 +571,7 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
long txpwr_threshold_lv1, txpwr_threshold_lv2;
/* 2T2R TP issue */
@@ -587,7 +585,7 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if ((mac->link_state < MAC80211_LINKED) &&
- (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
@@ -599,25 +597,22 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
} else {
- undecorated_smoothed_pwdb =
- rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
- undecorated_smoothed_pwdb);
+ undec_sm_pwdb);
}
txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
@@ -625,12 +620,12 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (rtl_get_bbreg(hw, 0xc90, MASKBYTE0) == 1)
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
- else if (undecorated_smoothed_pwdb >= txpwr_threshold_lv2)
+ else if (undec_sm_pwdb >= txpwr_threshold_lv2)
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL2;
- else if ((undecorated_smoothed_pwdb < (txpwr_threshold_lv2 - 3)) &&
- (undecorated_smoothed_pwdb >= txpwr_threshold_lv1))
+ else if ((undec_sm_pwdb < (txpwr_threshold_lv2 - 3)) &&
+ (undec_sm_pwdb >= txpwr_threshold_lv1))
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL1;
- else if (undecorated_smoothed_pwdb < (txpwr_threshold_lv1 - 3))
+ else if (undec_sm_pwdb < (txpwr_threshold_lv1 - 3))
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl))
@@ -665,10 +660,10 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
digtable->dig_state = DM_STA_DIG_MAX;
digtable->dig_highpwrstate = DM_STA_DIG_MAX;
- digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
- digtable->pre_sta_connectstate = DIG_STA_DISCONNECT;
- digtable->cur_ap_connectstate = DIG_AP_DISCONNECT;
- digtable->pre_ap_connectstate = DIG_AP_DISCONNECT;
+ digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
+ digtable->pre_sta_cstate = DIG_STA_DISCONNECT;
+ digtable->cur_ap_cstate = DIG_AP_DISCONNECT;
+ digtable->pre_ap_cstate = DIG_AP_DISCONNECT;
digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
@@ -681,7 +676,7 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
/* for dig debug rssi value */
digtable->rssi_val = 50;
- digtable->backoff_val = DM_DIG_BACKOFF;
+ digtable->back_val = DM_DIG_BACKOFF;
digtable->rx_gain_range_max = DM_DIG_MAX;
digtable->rx_gain_range_min = DM_DIG_MIN;
@@ -709,7 +704,7 @@ void rtl92s_dm_init(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
- rtlpriv->dm.undecorated_smoothed_pwdb = -1;
+ rtlpriv->dm.undec_sm_pwdb = -1;
_rtl92s_dm_init_dynamic_txpower(hw);
rtl92s_dm_init_edca_turbo(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 4542e69..1d72779 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1697,7 +1697,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
hwinfo[EEPROM_TXPOWERBASE + 6 + rf_path * 3 + i];
/* Read OFDM RF A & B Tx power for 2T */
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path][i]
= hwinfo[EEPROM_TXPOWERBASE + 12 +
rf_path * 3 + i];
}
@@ -1722,7 +1722,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
"RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
rf_path, i,
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf
[rf_path][i]);
for (rf_path = 0; rf_path < 2; rf_path++) {
@@ -1748,7 +1748,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
[rf_path][index];
rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
- rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf
[rf_path][index];
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index b917a2a..6740497 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -139,17 +139,17 @@ static u32 _rtl92s_phy_rf_serial_read(struct ieee80211_hw *hw,
BIT(8));
if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
BLSSI_READBACK_DATA);
else
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSI_READBACK_DATA);
- retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSI_READBACK_DATA);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rflssi_readback, retvalue);
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
@@ -696,7 +696,7 @@ static void _rtl92s_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
else
return;
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data;
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
if (index == 5)
rtlphy->pwrgroup_cnt++;
}
@@ -765,14 +765,10 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rfhssi_para2 = RFPGA0_XD_HSSIPARAMETER2;
/* RF switch Control */
- rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
- RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
- RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
/* AGC control 1 */
rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
@@ -787,14 +783,10 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
/* RX AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
- ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
- ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
- ROFDM0_XCRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
- ROFDM0_XDRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
/* RX AFE control 1 */
rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
@@ -803,14 +795,10 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
/* Tx AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
- ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
- ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
- ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
- ROFDM0_XDTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
/* Tx AFE control 2 */
rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
@@ -819,20 +807,14 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
/* Tranceiver LSSI Readback */
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
- RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
- RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
- RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
- RFPGA0_XD_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
/* Tranceiver LSSI Readback PI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
- TRANSCEIVERA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
- TRANSCEIVERB_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 08c2f56..5061f1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -192,8 +192,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
* defined by Realtek for large power */
chnlgroup = 0;
- writeval = rtlphy->mcs_txpwrlevel_origoffset
- [chnlgroup][index] +
+ writeval = rtlphy->mcs_offset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
@@ -223,8 +222,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
chnlgroup++;
}
- writeval = rtlphy->mcs_txpwrlevel_origoffset
- [chnlgroup][index]
+ writeval = rtlphy->mcs_offset[chnlgroup][index]
+ ((index < 2) ?
pwrbase0 : pwrbase1);
@@ -257,8 +255,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
}
for (i = 0; i < 4; i++) {
- pwrdiff_limit[i] =
- (u8)((rtlphy->mcs_txpwrlevel_origoffset
+ pwrdiff_limit[i] = (u8)((rtlphy->mcs_offset
[chnlgroup][index] & (0x7f << (i * 8)))
>> (i * 8));
@@ -296,7 +293,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
break;
default:
chnlgroup = 0;
- writeval = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index] +
+ writeval = rtlphy->mcs_offset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"RTK better performance, writeval = 0x%x\n", writeval);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index e3cf4c0..1ad51e7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -129,8 +129,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
pstats->packet_beacon = packet_beacon;
- pstats->rx_mimo_signalquality[0] = -1;
- pstats->rx_mimo_signalquality[1] = -1;
+ pstats->rx_mimo_sig_qual[0] = -1;
+ pstats->rx_mimo_sig_qual[1] = -1;
if (is_cck) {
u8 report, cck_highpwr;
@@ -216,8 +216,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
}
pstats->signalquality = sq;
- pstats->rx_mimo_signalquality[0] = sq;
- pstats->rx_mimo_signalquality[1] = -1;
+ pstats->rx_mimo_sig_qual[0] = sq;
+ pstats->rx_mimo_sig_qual[1] = -1;
}
} else {
rtlpriv->dm.rfpath_rxenable[0] =
@@ -256,8 +256,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
if (i == 0)
pstats->signalquality = (u8)(evm &
0xff);
- pstats->rx_mimo_signalquality[i] =
- (u8) (evm & 0xff);
+ pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
}
}
}
@@ -366,7 +365,7 @@ static void _rtl92se_process_pwdb(struct ieee80211_hw *hw,
return;
} else {
undec_sm_pwdb =
- rtlpriv->dm.undecorated_smoothed_pwdb;
+ rtlpriv->dm.undec_sm_pwdb;
}
if (pstats->packet_toself || pstats->packet_beacon) {
@@ -386,7 +385,7 @@ static void _rtl92se_process_pwdb(struct ieee80211_hw *hw,
(RX_SMOOTH_FACTOR);
}
- rtlpriv->dm.undecorated_smoothed_pwdb = undec_sm_pwdb;
+ rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
_rtl92se_update_rxsignalstatistics(hw, pstats);
}
}
@@ -398,16 +397,16 @@ static void rtl_92s_process_streams(struct ieee80211_hw *hw,
u32 stream;
for (stream = 0; stream < 2; stream++) {
- if (pstats->rx_mimo_signalquality[stream] != -1) {
+ if (pstats->rx_mimo_sig_qual[stream] != -1) {
if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
rtlpriv->stats.rx_evm_percentage[stream] =
- pstats->rx_mimo_signalquality[stream];
+ pstats->rx_mimo_sig_qual[stream];
}
rtlpriv->stats.rx_evm_percentage[stream] =
((rtlpriv->stats.rx_evm_percentage[stream] *
(RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalquality[stream] *
+ (pstats->rx_mimo_sig_qual[stream] *
1)) / (RX_SMOOTH_FACTOR);
}
}
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index f1b6bc6..6794b68 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -198,15 +198,15 @@ struct bb_reg_def {
u32 rftxgain_stage;
u32 rfhssi_para1;
u32 rfhssi_para2;
- u32 rfswitch_control;
+ u32 rfsw_ctrl;
u32 rfagc_control1;
u32 rfagc_control2;
- u32 rfrxiq_imbalance;
+ u32 rfrxiq_imbal;
u32 rfrx_afe;
- u32 rftxiq_imbalance;
+ u32 rftxiq_imbal;
u32 rftx_afe;
- u32 rflssi_readback;
- u32 rflssi_readbackpi;
+ u32 rf_rb; /* rflssi_readback */
+ u32 rf_rbpi; /* rflssi_readbackpi */
};
enum io_type {
@@ -885,7 +885,7 @@ struct rtl_phy {
u8 pwrgroup_cnt;
u8 cck_high_power;
/* MAX_PG_GROUP groups of pwr diff by rates */
- u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
+ u32 mcs_offset[MAX_PG_GROUP][16];
u8 default_initialgain[4];
/* the current Tx power level */
@@ -933,7 +933,7 @@ struct rtl_tid_data {
};
struct rssi_sta {
- long undecorated_smoothed_pwdb;
+ long undec_sm_pwdb;
};
struct rtl_sta_info {
@@ -1131,9 +1131,9 @@ struct rtl_security {
struct rtl_dm {
/*PHY status for Dynamic Management */
- long entry_min_undecoratedsmoothed_pwdb;
- long undecorated_smoothed_pwdb; /*out dm */
- long entry_max_undecoratedsmoothed_pwdb;
+ long entry_min_undec_sm_pwdb;
+ long undec_sm_pwdb; /*out dm */
+ long entry_max_undec_sm_pwdb;
bool dm_initialgain_enable;
bool dynamic_txpower_enable;
bool current_turbo_edca;
@@ -1209,7 +1209,7 @@ struct rtl_efuse {
u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
- u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+ u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX];
u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
@@ -1351,7 +1351,7 @@ struct rtl_stats {
bool rx_is40Mhzpacket;
u32 rx_pwdb_all;
u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
- s8 rx_mimo_signalquality[2];
+ s8 rx_mimo_sig_qual[2];
bool packet_matchbssid;
bool is_cck;
bool is_ht;
@@ -1503,6 +1503,9 @@ struct rtl_hal_ops {
void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
+ void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
+ bool mstate);
+ void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
};
struct rtl_intf_ops {
@@ -1679,7 +1682,7 @@ struct dig_t {
u32 rssi_highthresh;
u32 fa_lowthresh;
u32 fa_highthresh;
- long last_min_undecorated_pwdb_for_dm;
+ long last_min_undec_pwdb_for_dm;
long rssi_highpower_lowthresh;
long rssi_highpower_highthresh;
u32 recover_cnt;
@@ -1692,15 +1695,15 @@ struct dig_t {
u8 dig_twoport_algorithm;
u8 dig_dbgmode;
u8 dig_slgorithm_switch;
- u8 cursta_connectstate;
- u8 presta_connectstate;
- u8 curmultista_connectstate;
- char backoff_val;
- char backoff_val_range_max;
- char backoff_val_range_min;
+ u8 cursta_cstate;
+ u8 presta_cstate;
+ u8 curmultista_cstate;
+ char back_val;
+ char back_range_max;
+ char back_range_min;
u8 rx_gain_range_max;
u8 rx_gain_range_min;
- u8 min_undecorated_pwdb_for_dm;
+ u8 min_undec_pwdb_for_dm;
u8 rssi_val_min;
u8 pre_cck_pd_state;
u8 cur_cck_pd_state;
@@ -1712,10 +1715,10 @@ struct dig_t {
u8 forbidden_igi;
u8 dig_state;
u8 dig_highpwrstate;
- u8 cur_sta_connectstate;
- u8 pre_sta_connectstate;
- u8 cur_ap_connectstate;
- u8 pre_ap_connectstate;
+ u8 cur_sta_cstate;
+ u8 pre_sta_cstate;
+ u8 cur_ap_cstate;
+ u8 pre_ap_cstate;
u8 cur_pd_thstate;
u8 pre_pd_thstate;
u8 cur_cs_ratiostate;
@@ -1846,7 +1849,7 @@ struct bt_coexist_info {
u8 eeprom_bt_coexist;
u8 eeprom_bt_type;
u8 eeprom_bt_ant_num;
- u8 eeprom_bt_ant_isolation;
+ u8 eeprom_bt_ant_isol;
u8 eeprom_bt_radio_shared;
u8 bt_coexistence;
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index bf05831..36c3590 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,7 +2,7 @@
# Makefile for nfc devices
#
-obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o
+obj-$(CONFIG_PN544_HCI_NFC) += pn544/
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 97c440a..18e279d 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -84,6 +84,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_LISTEN_TIME 2
/* frame definitions */
+#define PN533_NORMAL_FRAME_MAX_LEN 262 /* 6 (PREAMBLE, SOF, LEN, LCS, TFI)
+ 254 (DATA)
+ 2 (DCS, postamble) */
+
#define PN533_FRAME_TAIL_SIZE 2
#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
PN533_FRAME_TAIL_SIZE)
@@ -1165,8 +1169,7 @@ static void pn533_poll_create_mod_list(struct pn533 *dev,
pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
}
-static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+static int pn533_start_poll_complete(struct pn533 *dev, u8 *params, int params_len)
{
struct pn533_poll_response *resp;
int rc;
@@ -1304,8 +1307,7 @@ static void pn533_wq_tg_get_data(struct work_struct *work)
}
#define ATR_REQ_GB_OFFSET 17
-static int pn533_init_target_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+static int pn533_init_target_complete(struct pn533 *dev, u8 *params, int params_len)
{
struct pn533_cmd_init_target_response *resp;
u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
@@ -1402,9 +1404,9 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
if (cur_mod->len == 0) {
del_timer(&dev->listen_timer);
- return pn533_init_target_complete(dev, arg, params, params_len);
+ return pn533_init_target_complete(dev, params, params_len);
} else {
- rc = pn533_start_poll_complete(dev, arg, params, params_len);
+ rc = pn533_start_poll_complete(dev, params, params_len);
if (!rc)
return rc;
}
@@ -2373,9 +2375,9 @@ static int pn533_probe(struct usb_interface *interface,
goto error;
}
- dev->in_frame = kmalloc(dev->in_maxlen, GFP_KERNEL);
+ dev->in_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
- dev->out_frame = kmalloc(dev->out_maxlen, GFP_KERNEL);
+ dev->out_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->in_frame || !dev->out_frame ||
diff --git a/drivers/nfc/pn544/Makefile b/drivers/nfc/pn544/Makefile
new file mode 100644
index 0000000..7257338
--- /dev/null
+++ b/drivers/nfc/pn544/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for PN544 HCI based NFC driver
+#
+
+obj-$(CONFIG_PN544_HCI_NFC) += pn544_i2c.o
+
+pn544_i2c-y := pn544.o i2c.o
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
new file mode 100644
index 0000000..fb430d8
--- /dev/null
+++ b/drivers/nfc/pn544/i2c.c
@@ -0,0 +1,500 @@
+/*
+ * I2C Link Layer for PN544 HCI based Driver
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/nfc/pn544.h>
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "pn544.h"
+
+#define PN544_I2C_FRAME_HEADROOM 1
+#define PN544_I2C_FRAME_TAILROOM 2
+
+/* framing in HCI mode */
+#define PN544_HCI_I2C_LLC_LEN 1
+#define PN544_HCI_I2C_LLC_CRC 2
+#define PN544_HCI_I2C_LLC_LEN_CRC (PN544_HCI_I2C_LLC_LEN + \
+ PN544_HCI_I2C_LLC_CRC)
+#define PN544_HCI_I2C_LLC_MIN_SIZE (1 + PN544_HCI_I2C_LLC_LEN_CRC)
+#define PN544_HCI_I2C_LLC_MAX_PAYLOAD 29
+#define PN544_HCI_I2C_LLC_MAX_SIZE (PN544_HCI_I2C_LLC_LEN_CRC + 1 + \
+ PN544_HCI_I2C_LLC_MAX_PAYLOAD)
+
+static struct i2c_device_id pn544_hci_i2c_id_table[] = {
+ {"pn544", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
+
+#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
+
+struct pn544_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct nfc_hci_dev *hdev;
+
+ unsigned int gpio_en;
+ unsigned int gpio_irq;
+ unsigned int gpio_fw;
+ unsigned int en_polarity;
+
+ int powered;
+
+ int hard_fault; /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+#define I2C_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, (skb)->data, (skb)->len, 0); \
+} while (0)
+
+static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
+{
+ int polarity, retry, ret;
+ char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
+ int count = sizeof(rset_cmd);
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+ dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
+
+ /* Disable fw download */
+ gpio_set_value(phy->gpio_fw, 0);
+
+ for (polarity = 0; polarity < 2; polarity++) {
+ phy->en_polarity = polarity;
+ retry = 3;
+ while (retry--) {
+ /* power off */
+ gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ /* power on */
+ gpio_set_value(phy->gpio_en, phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ /* send reset */
+ dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
+ ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
+ if (ret == count) {
+ dev_info(&phy->i2c_dev->dev,
+ "nfc_en polarity : active %s\n",
+ (polarity == 0 ? "low" : "high"));
+ goto out;
+ }
+ }
+ }
+
+ dev_err(&phy->i2c_dev->dev,
+ "Could not detect nfc_en polarity, fallback to active high\n");
+
+out:
+ gpio_set_value(phy->gpio_en, !phy->en_polarity);
+}
+
+static int pn544_hci_i2c_enable(void *phy_id)
+{
+ struct pn544_i2c_phy *phy = phy_id;
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ gpio_set_value(phy->gpio_fw, 0);
+ gpio_set_value(phy->gpio_en, phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ phy->powered = 1;
+
+ return 0;
+}
+
+static void pn544_hci_i2c_disable(void *phy_id)
+{
+ struct pn544_i2c_phy *phy = phy_id;
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ gpio_set_value(phy->gpio_fw, 0);
+ gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ gpio_set_value(phy->gpio_en, phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ gpio_set_value(phy->gpio_en, !phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ phy->powered = 0;
+}
+
+static void pn544_hci_i2c_add_len_crc(struct sk_buff *skb)
+{
+ u16 crc;
+ int len;
+
+ len = skb->len + 2;
+ *skb_push(skb, 1) = len;
+
+ crc = crc_ccitt(0xffff, skb->data, skb->len);
+ crc = ~crc;
+ *skb_put(skb, 1) = crc & 0xff;
+ *skb_put(skb, 1) = crc >> 8;
+}
+
+static void pn544_hci_i2c_remove_len_crc(struct sk_buff *skb)
+{
+ skb_pull(skb, PN544_I2C_FRAME_HEADROOM);
+ skb_trim(skb, PN544_I2C_FRAME_TAILROOM);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int pn544_hci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+ int r;
+ struct pn544_i2c_phy *phy = phy_id;
+ struct i2c_client *client = phy->i2c_dev;
+
+ if (phy->hard_fault != 0)
+ return phy->hard_fault;
+
+ usleep_range(3000, 6000);
+
+ pn544_hci_i2c_add_len_crc(skb);
+
+ I2C_DUMP_SKB("i2c frame written", skb);
+
+ r = i2c_master_send(client, skb->data, skb->len);
+
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(6000, 10000);
+ r = i2c_master_send(client, skb->data, skb->len);
+ }
+
+ if (r >= 0) {
+ if (r != skb->len)
+ r = -EREMOTEIO;
+ else
+ r = 0;
+ }
+
+ pn544_hci_i2c_remove_len_crc(skb);
+
+ return r;
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+ int len;
+ u16 crc;
+
+ len = buf[0] + 1;
+ crc = crc_ccitt(0xffff, buf, len - 2);
+ crc = ~crc;
+
+ if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
+ pr_err(PN544_HCI_I2C_DRIVER_NAME
+ ": CRC error 0x%x != 0x%x 0x%x\n",
+ crc, buf[len - 1], buf[len - 2]);
+
+ pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+ print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+ 16, 2, buf, buflen, false);
+ return -EPERM;
+ }
+ return 0;
+}
+
+/*
+ * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
+ * that i2c bus will be flushed and that next read will start on a new frame.
+ * returned skb contains only LLC header and payload.
+ * returns:
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
+{
+ int r;
+ u8 len;
+ u8 tmp[PN544_HCI_I2C_LLC_MAX_SIZE - 1];
+ struct i2c_client *client = phy->i2c_dev;
+
+ r = i2c_master_recv(client, &len, 1);
+ if (r != 1) {
+ dev_err(&client->dev, "cannot read len byte\n");
+ return -EREMOTEIO;
+ }
+
+ if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
+ (len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
+ dev_err(&client->dev, "invalid len byte\n");
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ *skb = alloc_skb(1 + len, GFP_KERNEL);
+ if (*skb == NULL) {
+ r = -ENOMEM;
+ goto flush;
+ }
+
+ *skb_put(*skb, 1) = len;
+
+ r = i2c_master_recv(client, skb_put(*skb, len), len);
+ if (r != len) {
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ I2C_DUMP_SKB("i2c frame read", *skb);
+
+ r = check_crc((*skb)->data, (*skb)->len);
+ if (r != 0) {
+ kfree_skb(*skb);
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ skb_pull(*skb, 1);
+ skb_trim(*skb, (*skb)->len - 2);
+
+ usleep_range(3000, 6000);
+
+ return 0;
+
+flush:
+ if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+ r = -EREMOTEIO;
+
+ usleep_range(3000, 6000);
+
+ return r;
+}
+
+/*
+ * Reads an shdlc frame from the chip. This is not as straightforward as it
+ * seems. There are cases where we could loose the frame start synchronization.
+ * The frame format is len-data-crc, and corruption can occur anywhere while
+ * transiting on i2c bus, such that we could read an invalid len.
+ * In order to recover synchronization with the next frame, we must be sure
+ * to read the real amount of data without using the len byte. We do this by
+ * assuming the following:
+ * - the chip will always present only one single complete frame on the bus
+ * before triggering the interrupt
+ * - the chip will not present a new frame until we have completely read
+ * the previous one (or until we have handled the interrupt).
+ * The tricky case is when we read a corrupted len that is less than the real
+ * len. We must detect this here in order to determine that we need to flush
+ * the bus. This is the reason why we check the crc here.
+ */
+static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+ struct pn544_i2c_phy *phy = phy_id;
+ struct i2c_client *client;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ if (!phy || irq != phy->i2c_dev->irq) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (phy->hard_fault != 0)
+ return IRQ_HANDLED;
+
+ r = pn544_hci_i2c_read(phy, &skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
+
+ nfc_hci_recv_frame(phy->hdev, NULL);
+
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+ return IRQ_HANDLED;
+ }
+
+ nfc_hci_recv_frame(phy->hdev, skb);
+
+ return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+ .write = pn544_hci_i2c_write,
+ .enable = pn544_hci_i2c_enable,
+ .disable = pn544_hci_i2c_disable,
+};
+
+static int __devinit pn544_hci_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pn544_i2c_phy *phy;
+ struct pn544_nfc_platform_data *pdata;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ phy = kzalloc(sizeof(struct pn544_i2c_phy), GFP_KERNEL);
+ if (!phy) {
+ dev_err(&client->dev,
+ "Cannot allocate memory for pn544 i2c phy.\n");
+ r = -ENOMEM;
+ goto err_phy_alloc;
+ }
+
+ phy->i2c_dev = client;
+ i2c_set_clientdata(client, phy);
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ r = -EINVAL;
+ goto err_pdata;
+ }
+
+ if (pdata->request_resources == NULL) {
+ dev_err(&client->dev, "request_resources() missing\n");
+ r = -EINVAL;
+ goto err_pdata;
+ }
+
+ r = pdata->request_resources(client);
+ if (r) {
+ dev_err(&client->dev, "Cannot get platform resources\n");
+ goto err_pdata;
+ }
+
+ phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
+ phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
+ phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+
+ pn544_hci_i2c_platform_init(phy);
+
+ r = request_threaded_irq(client->irq, NULL, pn544_hci_i2c_irq_thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ PN544_HCI_I2C_DRIVER_NAME, phy);
+ if (r < 0) {
+ dev_err(&client->dev, "Unable to register IRQ handler\n");
+ goto err_rti;
+ }
+
+ r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+ PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
+ PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+ if (r < 0)
+ goto err_hci;
+
+ return 0;
+
+err_hci:
+ free_irq(client->irq, phy);
+
+err_rti:
+ if (pdata->free_resources != NULL)
+ pdata->free_resources();
+
+err_pdata:
+ kfree(phy);
+
+err_phy_alloc:
+ return r;
+}
+
+static __devexit int pn544_hci_i2c_remove(struct i2c_client *client)
+{
+ struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
+ struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ pn544_hci_remove(phy->hdev);
+
+ if (phy->powered)
+ pn544_hci_i2c_disable(phy);
+
+ free_irq(client->irq, phy);
+ if (pdata->free_resources)
+ pdata->free_resources();
+
+ kfree(phy);
+
+ return 0;
+}
+
+static struct i2c_driver pn544_hci_i2c_driver = {
+ .driver = {
+ .name = PN544_HCI_I2C_DRIVER_NAME,
+ },
+ .probe = pn544_hci_i2c_probe,
+ .id_table = pn544_hci_i2c_id_table,
+ .remove = __devexit_p(pn544_hci_i2c_remove),
+};
+
+static int __init pn544_hci_i2c_init(void)
+{
+ int r;
+
+ pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+ r = i2c_add_driver(&pn544_hci_i2c_driver);
+ if (r) {
+ pr_err(PN544_HCI_I2C_DRIVER_NAME
+ ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit pn544_hci_i2c_exit(void)
+{
+ i2c_del_driver(&pn544_hci_i2c_driver);
+}
+
+module_init(pn544_hci_i2c_init);
+module_exit(pn544_hci_i2c_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544/pn544.c
index c9c8570..cc666de 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -18,47 +18,21 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/crc-ccitt.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
#include <linux/nfc.h>
#include <net/nfc/hci.h>
#include <net/nfc/llc.h>
-#include <linux/nfc/pn544.h>
-
-#define DRIVER_DESC "HCI NFC driver for PN544"
-
-#define PN544_HCI_DRIVER_NAME "pn544_hci"
+#include "pn544.h"
/* Timing restrictions (ms) */
#define PN544_HCI_RESETVEN_TIME 30
-static struct i2c_device_id pn544_hci_id_table[] = {
- {"pn544", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, pn544_hci_id_table);
-
#define HCI_MODE 0
#define FW_MODE 1
-/* framing in HCI mode */
-#define PN544_HCI_LLC_LEN 1
-#define PN544_HCI_LLC_CRC 2
-#define PN544_HCI_LLC_LEN_CRC (PN544_HCI_LLC_LEN + PN544_HCI_LLC_CRC)
-#define PN544_HCI_LLC_MIN_SIZE (1 + PN544_HCI_LLC_LEN_CRC)
-#define PN544_HCI_LLC_MAX_PAYLOAD 29
-#define PN544_HCI_LLC_MAX_SIZE (PN544_HCI_LLC_LEN_CRC + 1 + \
- PN544_HCI_LLC_MAX_PAYLOAD)
-
enum pn544_state {
PN544_ST_COLD,
PN544_ST_FW_READY,
@@ -100,6 +74,10 @@ enum pn544_state {
#define PN544_SYS_MGMT_INFO_NOTIFICATION 0x02
#define PN544_POLLING_LOOP_MGMT_GATE 0x94
+#define PN544_DEP_MODE 0x01
+#define PN544_DEP_ATR_REQ 0x02
+#define PN544_DEP_ATR_RES 0x03
+#define PN544_DEP_MERGE 0x0D
#define PN544_PL_RDPHASES 0x06
#define PN544_PL_EMULATION 0x07
#define PN544_PL_NFCT_DEACTIVATED 0x09
@@ -108,6 +86,15 @@ enum pn544_state {
#define PN544_NFC_WI_MGMT_GATE 0xA1
+#define PN544_HCI_EVT_SND_DATA 0x01
+#define PN544_HCI_EVT_ACTIVATED 0x02
+#define PN544_HCI_EVT_DEACTIVATED 0x03
+#define PN544_HCI_EVT_RCV_DATA 0x04
+#define PN544_HCI_EVT_CONTINUE_MI 0x05
+
+#define PN544_HCI_CMD_ATTREQUEST 0x12
+#define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13
+
static struct nfc_hci_gate pn544_gates[] = {
{NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
{NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
@@ -128,259 +115,22 @@ static struct nfc_hci_gate pn544_gates[] = {
/* Largest headroom needed for outgoing custom commands */
#define PN544_CMDS_HEADROOM 2
-#define PN544_FRAME_HEADROOM 1
-#define PN544_FRAME_TAILROOM 2
struct pn544_hci_info {
- struct i2c_client *i2c_dev;
+ struct nfc_phy_ops *phy_ops;
+ void *phy_id;
+
struct nfc_hci_dev *hdev;
enum pn544_state state;
struct mutex info_lock;
- unsigned int gpio_en;
- unsigned int gpio_irq;
- unsigned int gpio_fw;
- unsigned int en_polarity;
-
- int hard_fault; /*
- * < 0 if hardware error occured (e.g. i2c err)
- * and prevents normal operation.
- */
int async_cb_type;
data_exchange_cb_t async_cb;
void *async_cb_context;
};
-static void pn544_hci_platform_init(struct pn544_hci_info *info)
-{
- int polarity, retry, ret;
- char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
- int count = sizeof(rset_cmd);
-
- pr_info(DRIVER_DESC ": %s\n", __func__);
- dev_info(&info->i2c_dev->dev, "Detecting nfc_en polarity\n");
-
- /* Disable fw download */
- gpio_set_value(info->gpio_fw, 0);
-
- for (polarity = 0; polarity < 2; polarity++) {
- info->en_polarity = polarity;
- retry = 3;
- while (retry--) {
- /* power off */
- gpio_set_value(info->gpio_en, !info->en_polarity);
- usleep_range(10000, 15000);
-
- /* power on */
- gpio_set_value(info->gpio_en, info->en_polarity);
- usleep_range(10000, 15000);
-
- /* send reset */
- dev_dbg(&info->i2c_dev->dev, "Sending reset cmd\n");
- ret = i2c_master_send(info->i2c_dev, rset_cmd, count);
- if (ret == count) {
- dev_info(&info->i2c_dev->dev,
- "nfc_en polarity : active %s\n",
- (polarity == 0 ? "low" : "high"));
- goto out;
- }
- }
- }
-
- dev_err(&info->i2c_dev->dev,
- "Could not detect nfc_en polarity, fallback to active high\n");
-
-out:
- gpio_set_value(info->gpio_en, !info->en_polarity);
-}
-
-static int pn544_hci_enable(struct pn544_hci_info *info, int mode)
-{
- pr_info(DRIVER_DESC ": %s\n", __func__);
-
- gpio_set_value(info->gpio_fw, 0);
- gpio_set_value(info->gpio_en, info->en_polarity);
- usleep_range(10000, 15000);
-
- return 0;
-}
-
-static void pn544_hci_disable(struct pn544_hci_info *info)
-{
- pr_info(DRIVER_DESC ": %s\n", __func__);
-
- gpio_set_value(info->gpio_fw, 0);
- gpio_set_value(info->gpio_en, !info->en_polarity);
- usleep_range(10000, 15000);
-
- gpio_set_value(info->gpio_en, info->en_polarity);
- usleep_range(10000, 15000);
-
- gpio_set_value(info->gpio_en, !info->en_polarity);
- usleep_range(10000, 15000);
-}
-
-static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
-{
- int r;
-
- usleep_range(3000, 6000);
-
- r = i2c_master_send(client, buf, len);
-
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
- usleep_range(6000, 10000);
- r = i2c_master_send(client, buf, len);
- }
-
- if (r >= 0) {
- if (r != len)
- return -EREMOTEIO;
- else
- return 0;
- }
-
- return r;
-}
-
-static int check_crc(u8 *buf, int buflen)
-{
- int len;
- u16 crc;
-
- len = buf[0] + 1;
- crc = crc_ccitt(0xffff, buf, len - 2);
- crc = ~crc;
-
- if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
- pr_err(PN544_HCI_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
- crc, buf[len - 1], buf[len - 2]);
-
- pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
- print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
- 16, 2, buf, buflen, false);
- return -EPERM;
- }
- return 0;
-}
-
-/*
- * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
- * that i2c bus will be flushed and that next read will start on a new frame.
- * returned skb contains only LLC header and payload.
- * returns:
- * -EREMOTEIO : i2c read error (fatal)
- * -EBADMSG : frame was incorrect and discarded
- * -ENOMEM : cannot allocate skb, frame dropped
- */
-static int pn544_hci_i2c_read(struct i2c_client *client, struct sk_buff **skb)
-{
- int r;
- u8 len;
- u8 tmp[PN544_HCI_LLC_MAX_SIZE - 1];
-
- r = i2c_master_recv(client, &len, 1);
- if (r != 1) {
- dev_err(&client->dev, "cannot read len byte\n");
- return -EREMOTEIO;
- }
-
- if ((len < (PN544_HCI_LLC_MIN_SIZE - 1)) ||
- (len > (PN544_HCI_LLC_MAX_SIZE - 1))) {
- dev_err(&client->dev, "invalid len byte\n");
- r = -EBADMSG;
- goto flush;
- }
-
- *skb = alloc_skb(1 + len, GFP_KERNEL);
- if (*skb == NULL) {
- r = -ENOMEM;
- goto flush;
- }
-
- *skb_put(*skb, 1) = len;
-
- r = i2c_master_recv(client, skb_put(*skb, len), len);
- if (r != len) {
- kfree_skb(*skb);
- return -EREMOTEIO;
- }
-
- r = check_crc((*skb)->data, (*skb)->len);
- if (r != 0) {
- kfree_skb(*skb);
- r = -EBADMSG;
- goto flush;
- }
-
- skb_pull(*skb, 1);
- skb_trim(*skb, (*skb)->len - 2);
-
- usleep_range(3000, 6000);
-
- return 0;
-
-flush:
- if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
- r = -EREMOTEIO;
-
- usleep_range(3000, 6000);
-
- return r;
-}
-
-/*
- * Reads an shdlc frame from the chip. This is not as straightforward as it
- * seems. There are cases where we could loose the frame start synchronization.
- * The frame format is len-data-crc, and corruption can occur anywhere while
- * transiting on i2c bus, such that we could read an invalid len.
- * In order to recover synchronization with the next frame, we must be sure
- * to read the real amount of data without using the len byte. We do this by
- * assuming the following:
- * - the chip will always present only one single complete frame on the bus
- * before triggering the interrupt
- * - the chip will not present a new frame until we have completely read
- * the previous one (or until we have handled the interrupt).
- * The tricky case is when we read a corrupted len that is less than the real
- * len. We must detect this here in order to determine that we need to flush
- * the bus. This is the reason why we check the crc here.
- */
-static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
-{
- struct pn544_hci_info *info = dev_id;
- struct i2c_client *client;
- struct sk_buff *skb = NULL;
- int r;
-
- if (!info || irq != info->i2c_dev->irq) {
- WARN_ON_ONCE(1);
- return IRQ_NONE;
- }
-
- client = info->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
- if (info->hard_fault != 0)
- return IRQ_HANDLED;
-
- r = pn544_hci_i2c_read(client, &skb);
- if (r == -EREMOTEIO) {
- info->hard_fault = r;
-
- nfc_hci_recv_frame(info->hdev, NULL);
-
- return IRQ_HANDLED;
- } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
- return IRQ_HANDLED;
- }
-
- nfc_hci_recv_frame(info->hdev, skb);
-
- return IRQ_HANDLED;
-}
-
static int pn544_hci_open(struct nfc_hci_dev *hdev)
{
struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
@@ -393,7 +143,7 @@ static int pn544_hci_open(struct nfc_hci_dev *hdev)
goto out;
}
- r = pn544_hci_enable(info, HCI_MODE);
+ r = info->phy_ops->enable(info->phy_id);
if (r == 0)
info->state = PN544_ST_READY;
@@ -412,7 +162,7 @@ static void pn544_hci_close(struct nfc_hci_dev *hdev)
if (info->state == PN544_ST_COLD)
goto out;
- pn544_hci_disable(info);
+ info->phy_ops->disable(info->phy_id);
info->state = PN544_ST_COLD;
@@ -587,40 +337,11 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
return 0;
}
-static void pn544_hci_add_len_crc(struct sk_buff *skb)
-{
- u16 crc;
- int len;
-
- len = skb->len + 2;
- *skb_push(skb, 1) = len;
-
- crc = crc_ccitt(0xffff, skb->data, skb->len);
- crc = ~crc;
- *skb_put(skb, 1) = crc & 0xff;
- *skb_put(skb, 1) = crc >> 8;
-}
-
-static void pn544_hci_remove_len_crc(struct sk_buff *skb)
-{
- skb_pull(skb, PN544_FRAME_HEADROOM);
- skb_trim(skb, PN544_FRAME_TAILROOM);
-}
-
static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
{
struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
- struct i2c_client *client = info->i2c_dev;
- int r;
- if (info->hard_fault != 0)
- return info->hard_fault;
-
- pn544_hci_add_len_crc(skb);
- r = pn544_hci_i2c_write(client, skb->data, skb->len);
- pn544_hci_remove_len_crc(skb);
-
- return r;
+ return info->phy_ops->write(info->phy_id, skb);
}
static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
@@ -630,6 +351,9 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
int r;
u8 duration[2];
u8 activated;
+ u8 i_mode = 0x3f; /* Enable all supported modes */
+ u8 t_mode = 0x0f;
+ u8 t_merge = 0x01; /* Enable merge by default */
pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
__func__, im_protocols, tm_protocols);
@@ -667,6 +391,61 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
if (r < 0)
return r;
+ if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
+ hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
+ &hdev->gb_len);
+ pr_debug("generate local bytes %p", hdev->gb);
+ if (hdev->gb == NULL || hdev->gb_len == 0) {
+ im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+ tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
+ }
+ }
+
+ if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ r = nfc_hci_send_event(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_set_param(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ PN544_DEP_MODE, &i_mode, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_set_param(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ PN544_DEP_ATR_REQ, hdev->gb, hdev->gb_len);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_send_event(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+ if (r < 0)
+ nfc_hci_send_event(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ }
+
+ if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+ PN544_DEP_MODE, &t_mode, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+ PN544_DEP_ATR_RES, hdev->gb, hdev->gb_len);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+ PN544_DEP_MERGE, &t_merge, 1);
+ if (r < 0)
+ return r;
+ }
+
r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
if (r < 0)
@@ -676,6 +455,43 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
return r;
}
+static int pn544_hci_dep_link_up(struct nfc_hci_dev *hdev,
+ struct nfc_target *target, u8 comm_mode,
+ u8 *gb, size_t gb_len)
+{
+ struct sk_buff *rgb_skb = NULL;
+ int r;
+
+ r = nfc_hci_get_param(hdev, target->hci_reader_gate,
+ PN544_DEP_ATR_RES, &rgb_skb);
+ if (r < 0)
+ return r;
+
+ if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+ print_hex_dump(KERN_DEBUG, "remote gb: ", DUMP_PREFIX_OFFSET,
+ 16, 1, rgb_skb->data, rgb_skb->len, true);
+
+ r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data,
+ rgb_skb->len);
+
+ if (r == 0)
+ r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode,
+ NFC_RF_INITIATOR);
+exit:
+ kfree_skb(rgb_skb);
+ return r;
+}
+
+static int pn544_hci_dep_link_down(struct nfc_hci_dev *hdev)
+{
+
+ return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+}
+
static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
struct nfc_target *target)
{
@@ -687,6 +503,9 @@ static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
target->supported_protocols = NFC_PROTO_JEWEL_MASK;
target->sens_res = 0x0c00;
break;
+ case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
+ target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ break;
default:
return -EPROTO;
}
@@ -701,7 +520,18 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
struct sk_buff *uid_skb;
int r = 0;
- if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+ if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
+ return r;
+
+ if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ r = nfc_hci_send_cmd(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ PN544_HCI_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL);
+ if (r < 0)
+ return r;
+
+ target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
+ } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
target->nfcid1_len != 10)
return -EPROTO;
@@ -724,6 +554,16 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
PN544_RF_READER_CMD_ACTIVATE_NEXT,
uid_skb->data, uid_skb->len, NULL);
kfree_skb(uid_skb);
+
+ r = nfc_hci_send_cmd(hdev,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ PN544_HCI_CMD_CONTINUE_ACTIVATION,
+ NULL, 0, NULL);
+ if (r < 0)
+ return r;
+
+ target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
+ target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
} else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
/*
* TODO: maybe other ISO 14443 require some kind of continue
@@ -769,7 +609,7 @@ static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
* <= 0: driver handled the data exchange
* 1: driver doesn't especially handle, please do standard processing
*/
-static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
+static int pn544_hci_im_transceive(struct nfc_hci_dev *hdev,
struct nfc_target *target,
struct sk_buff *skb, data_exchange_cb_t cb,
void *cb_context)
@@ -822,17 +662,110 @@ static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
PN544_JEWEL_RAW_CMD, skb->data,
skb->len, cb, cb_context);
+ case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
+ *skb_push(skb, 1) = 0;
+
+ return nfc_hci_send_event(hdev, target->hci_reader_gate,
+ PN544_HCI_EVT_SND_DATA, skb->data,
+ skb->len);
default:
return 1;
}
}
+static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Set default false for multiple information chaining */
+ *skb_push(skb, 1) = 0;
+
+ return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
+ PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
+}
+
static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
struct nfc_target *target)
{
- return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
- PN544_RF_READER_CMD_PRESENCE_CHECK,
- NULL, 0, NULL);
+ pr_debug("supported protocol %d", target->supported_protocols);
+ if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_ISO14443_B_MASK)) {
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_RF_READER_CMD_PRESENCE_CHECK,
+ NULL, 0, NULL);
+ } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+ if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
+ target->nfcid1_len != 10)
+ return -EOPNOTSUPP;
+
+ return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_CMD_ACTIVATE_NEXT,
+ target->nfcid1, target->nfcid1_len, NULL);
+ } else if (target->supported_protocols & NFC_PROTO_JEWEL_MASK) {
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_JEWEL_RAW_CMD, NULL, 0, NULL);
+ } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
+ return nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
+ PN544_FELICA_RAW, NULL, 0, NULL);
+ } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_HCI_CMD_ATTREQUEST,
+ NULL, 0, NULL);
+ }
+
+ return 0;
+}
+
+static void pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
+ u8 event, struct sk_buff *skb)
+{
+ struct sk_buff *rgb_skb = NULL;
+ int r = 0;
+
+ pr_debug("hci event %d", event);
+ switch (event) {
+ case PN544_HCI_EVT_ACTIVATED:
+ if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
+ nfc_hci_target_discovered(hdev, gate);
+ else if (gate == PN544_RF_READER_NFCIP1_TARGET_GATE) {
+ r = nfc_hci_get_param(hdev, gate, PN544_DEP_ATR_REQ,
+ &rgb_skb);
+
+ if (r < 0)
+ goto exit;
+
+ nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
+ NFC_COMM_PASSIVE, rgb_skb->data,
+ rgb_skb->len);
+
+ kfree_skb(rgb_skb);
+ }
+
+ break;
+ case PN544_HCI_EVT_DEACTIVATED:
+ nfc_hci_send_event(hdev, gate,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ break;
+ case PN544_HCI_EVT_RCV_DATA:
+ if (skb->len < 2) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ if (skb->data[0] != 0) {
+ pr_debug("data0 %d", skb->data[0]);
+ r = -EPROTO;
+ goto exit;
+ }
+
+ skb_pull(skb, 2);
+ nfc_tm_data_received(hdev->ndev, skb);
+
+ return;
+ default:
+ break;
+ }
+
+exit:
+ kfree_skb(skb);
}
static struct nfc_hci_ops pn544_hci_ops = {
@@ -841,74 +774,36 @@ static struct nfc_hci_ops pn544_hci_ops = {
.hci_ready = pn544_hci_ready,
.xmit = pn544_hci_xmit,
.start_poll = pn544_hci_start_poll,
+ .dep_link_up = pn544_hci_dep_link_up,
+ .dep_link_down = pn544_hci_dep_link_down,
.target_from_gate = pn544_hci_target_from_gate,
.complete_target_discovered = pn544_hci_complete_target_discovered,
- .data_exchange = pn544_hci_data_exchange,
+ .im_transceive = pn544_hci_im_transceive,
+ .tm_send = pn544_hci_tm_send,
.check_presence = pn544_hci_check_presence,
+ .event_received = pn544_hci_event_received,
};
-static int __devinit pn544_hci_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+ int phy_headroom, int phy_tailroom, int phy_payload,
+ struct nfc_hci_dev **hdev)
{
struct pn544_hci_info *info;
- struct pn544_nfc_platform_data *pdata;
- int r = 0;
u32 protocols;
struct nfc_hci_init_data init_data;
-
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
- return -ENODEV;
- }
+ int r;
info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
if (!info) {
- dev_err(&client->dev,
- "Cannot allocate memory for pn544_hci_info.\n");
+ pr_err("Cannot allocate memory for pn544_hci_info.\n");
r = -ENOMEM;
goto err_info_alloc;
}
- info->i2c_dev = client;
+ info->phy_ops = phy_ops;
+ info->phy_id = phy_id;
info->state = PN544_ST_COLD;
mutex_init(&info->info_lock);
- i2c_set_clientdata(client, info);
-
- pdata = client->dev.platform_data;
- if (pdata == NULL) {
- dev_err(&client->dev, "No platform data\n");
- r = -EINVAL;
- goto err_pdata;
- }
-
- if (pdata->request_resources == NULL) {
- dev_err(&client->dev, "request_resources() missing\n");
- r = -EINVAL;
- goto err_pdata;
- }
-
- r = pdata->request_resources(client);
- if (r) {
- dev_err(&client->dev, "Cannot get platform resources\n");
- goto err_pdata;
- }
-
- info->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
- info->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
- info->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
-
- pn544_hci_platform_init(info);
-
- r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- PN544_HCI_DRIVER_NAME, info);
- if (r < 0) {
- dev_err(&client->dev, "Unable to register IRQ handler\n");
- goto err_rti;
- }
init_data.gate_count = ARRAY_SIZE(pn544_gates);
@@ -928,13 +823,11 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
NFC_PROTO_NFC_DEP_MASK;
info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
- protocols, LLC_SHDLC_NAME,
- PN544_FRAME_HEADROOM +
- PN544_CMDS_HEADROOM,
- PN544_FRAME_TAILROOM,
- PN544_HCI_LLC_MAX_PAYLOAD);
+ protocols, llc_name,
+ phy_headroom + PN544_CMDS_HEADROOM,
+ phy_tailroom, phy_payload);
if (!info->hdev) {
- dev_err(&client->dev, "Cannot allocate nfc hdev.\n");
+ pr_err("Cannot allocate nfc hdev.\n");
r = -ENOMEM;
goto err_alloc_hdev;
}
@@ -945,79 +838,25 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
if (r)
goto err_regdev;
+ *hdev = info->hdev;
+
return 0;
err_regdev:
nfc_hci_free_device(info->hdev);
err_alloc_hdev:
- free_irq(client->irq, info);
-
-err_rti:
- if (pdata->free_resources != NULL)
- pdata->free_resources();
-
-err_pdata:
kfree(info);
err_info_alloc:
return r;
}
-static __devexit int pn544_hci_remove(struct i2c_client *client)
+void pn544_hci_remove(struct nfc_hci_dev *hdev)
{
- struct pn544_hci_info *info = i2c_get_clientdata(client);
- struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
- dev_dbg(&client->dev, "%s\n", __func__);
-
- nfc_hci_free_device(info->hdev);
-
- if (info->state != PN544_ST_COLD) {
- if (pdata->disable)
- pdata->disable();
- }
-
- free_irq(client->irq, info);
- if (pdata->free_resources)
- pdata->free_resources();
+ struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+ nfc_hci_unregister_device(hdev);
+ nfc_hci_free_device(hdev);
kfree(info);
-
- return 0;
}
-
-static struct i2c_driver pn544_hci_driver = {
- .driver = {
- .name = PN544_HCI_DRIVER_NAME,
- },
- .probe = pn544_hci_probe,
- .id_table = pn544_hci_id_table,
- .remove = __devexit_p(pn544_hci_remove),
-};
-
-static int __init pn544_hci_init(void)
-{
- int r;
-
- pr_debug(DRIVER_DESC ": %s\n", __func__);
-
- r = i2c_add_driver(&pn544_hci_driver);
- if (r) {
- pr_err(PN544_HCI_DRIVER_NAME ": driver registration failed\n");
- return r;
- }
-
- return 0;
-}
-
-static void __exit pn544_hci_exit(void)
-{
- i2c_del_driver(&pn544_hci_driver);
-}
-
-module_init(pn544_hci_init);
-module_exit(pn544_hci_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
new file mode 100644
index 0000000..f47c645
--- /dev/null
+++ b/drivers/nfc/pn544/pn544.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_PN544_H_
+#define __LOCAL_PN544_H_
+
+#include <net/nfc/hci.h>
+
+#define DRIVER_DESC "HCI NFC driver for PN544"
+
+int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
+ int phy_headroom, int phy_tailroom, int phy_payload,
+ struct nfc_hci_dev **hdev);
+void pn544_hci_remove(struct nfc_hci_dev *hdev);
+
+#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 258ca59..982d16b 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -6,7 +6,6 @@ menu "PPS support"
config PPS
tristate "PPS support"
- depends on EXPERIMENTAL
---help---
PPS (Pulse Per Second) is a special pulse provided by some GPS
antennae. Userland can use it to get a high-precision time
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index ffdf712..70c5836 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -4,13 +4,9 @@
menu "PTP clock support"
-comment "Enable Device Drivers -> PPS to see the PTP clock options."
- depends on PPS=n
-
config PTP_1588_CLOCK
tristate "PTP clock support"
- depends on EXPERIMENTAL
- depends on PPS
+ select PPS
help
The IEEE 1588 standard defines a method to precisely
synchronize distributed clocks over Ethernet networks. The
@@ -29,8 +25,9 @@ config PTP_1588_CLOCK
config PTP_1588_CLOCK_GIANFAR
tristate "Freescale eTSEC as PTP clock"
- depends on PTP_1588_CLOCK
depends on GIANFAR
+ select PTP_1588_CLOCK
+ default y
help
This driver adds support for using the eTSEC as a PTP
clock. This clock is only useful if your PTP programs are
@@ -42,8 +39,9 @@ config PTP_1588_CLOCK_GIANFAR
config PTP_1588_CLOCK_IXP46X
tristate "Intel IXP46x as PTP clock"
- depends on PTP_1588_CLOCK
depends on IXP4XX_ETH
+ select PTP_1588_CLOCK
+ default y
help
This driver adds support for using the IXP46X as a PTP
clock. This clock is only useful if your PTP programs are
@@ -54,13 +52,13 @@ config PTP_1588_CLOCK_IXP46X
will be called ptp_ixp46x.
comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
- depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n)
+ depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
config DP83640_PHY
tristate "Driver for the National Semiconductor DP83640 PHYTER"
- depends on PTP_1588_CLOCK
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
+ select PTP_1588_CLOCK
---help---
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -74,8 +72,9 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
- depends on PTP_1588_CLOCK
depends on PCH_GBE
+ select PTP_1588_CLOCK
+ default y
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index e7f301da2..4f8ae80 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -33,9 +33,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
{
struct ptp_clock_caps caps;
struct ptp_clock_request req;
+ struct ptp_sys_offset sysoff;
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops = ptp->info;
+ struct ptp_clock_time *pct;
+ struct timespec ts;
int enable, err = 0;
+ unsigned int i;
switch (cmd) {
@@ -88,6 +92,34 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = ops->enable(ops, &req, enable);
break;
+ case PTP_SYS_OFFSET:
+ if (copy_from_user(&sysoff, (void __user *)arg,
+ sizeof(sysoff))) {
+ err = -EFAULT;
+ break;
+ }
+ if (sysoff.n_samples > PTP_MAX_SAMPLES) {
+ err = -EINVAL;
+ break;
+ }
+ pct = &sysoff.ts[0];
+ for (i = 0; i < sysoff.n_samples; i++) {
+ getnstimeofday(&ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+ ptp->info->gettime(ptp->info, &ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+ }
+ getnstimeofday(&ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ if (copy_to_user((void __user *)arg, &sysoff, sizeof(sysoff)))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 266aa16..19396dc 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -37,6 +37,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index b58fef7..d7d5804 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -346,6 +346,8 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0);
}
break;
+ case 43222:
+ break;
default:
ssb_printk(KERN_ERR PFX
"ERROR: PLL init unknown for device %04X\n",
@@ -434,6 +436,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
min_msk = 0xCBB;
break;
case 0x4322:
+ case 43222:
/* We keep the default settings:
* min_msk = 0xCBB
* max_msk = 0x7FFFF
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index c625086..b918ba9 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -192,9 +192,10 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
/* When there is no chipcommon on the bus there is 4MB flash */
if (!bus->chipco.dev) {
- mcore->flash_buswidth = 2;
- mcore->flash_window = SSB_FLASH1;
- mcore->flash_window_size = SSB_FLASH1_SZ;
+ mcore->pflash.present = true;
+ mcore->pflash.buswidth = 2;
+ mcore->pflash.window = SSB_FLASH1;
+ mcore->pflash.window_size = SSB_FLASH1_SZ;
return;
}
@@ -206,13 +207,14 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
break;
case SSB_CHIPCO_FLASHT_PARA:
pr_debug("Found parallel flash\n");
- mcore->flash_window = SSB_FLASH2;
- mcore->flash_window_size = SSB_FLASH2_SZ;
+ mcore->pflash.present = true;
+ mcore->pflash.window = SSB_FLASH2;
+ mcore->pflash.window_size = SSB_FLASH2_SZ;
if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
& SSB_CHIPCO_CFG_DS16) == 0)
- mcore->flash_buswidth = 1;
+ mcore->pflash.buswidth = 1;
else
- mcore->flash_buswidth = 2;
+ mcore->pflash.buswidth = 2;
break;
}
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7f93f34..67898fa 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -42,6 +42,21 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
+/*
+ * For transmit, used buffer len is unused; we override it to track buffer
+ * status internally; used for zerocopy tx only.
+ */
+/* Lower device DMA failed */
+#define VHOST_DMA_FAILED_LEN 3
+/* Lower device DMA done */
+#define VHOST_DMA_DONE_LEN 2
+/* Lower device DMA in progress */
+#define VHOST_DMA_IN_PROGRESS 1
+/* Buffer unused */
+#define VHOST_DMA_CLEAR_LEN 0
+
+#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
+
enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
@@ -62,8 +77,33 @@ struct vhost_net {
* We only do this when socket buffer fills up.
* Protected by tx vq lock. */
enum vhost_net_poll_state tx_poll_state;
+ /* Number of TX recently submitted.
+ * Protected by tx vq lock. */
+ unsigned tx_packets;
+ /* Number of times zerocopy TX recently failed.
+ * Protected by tx vq lock. */
+ unsigned tx_zcopy_err;
};
+static void vhost_net_tx_packet(struct vhost_net *net)
+{
+ ++net->tx_packets;
+ if (net->tx_packets < 1024)
+ return;
+ net->tx_packets = 0;
+ net->tx_zcopy_err = 0;
+}
+
+static void vhost_net_tx_err(struct vhost_net *net)
+{
+ ++net->tx_zcopy_err;
+}
+
+static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
+{
+ return net->tx_packets / 64 >= net->tx_zcopy_err;
+}
+
static bool vhost_sock_zcopy(struct socket *sock)
{
return unlikely(experimental_zcopytx) &&
@@ -126,6 +166,55 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
net->tx_poll_state = VHOST_NET_POLL_STARTED;
}
+/* In case of DMA done not in order in lower device driver for some reason.
+ * upend_idx is used to track end of used idx, done_idx is used to track head
+ * of used idx. Once lower device DMA done contiguously, we will signal KVM
+ * guest used idx.
+ */
+static int vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
+{
+ int i;
+ int j = 0;
+
+ for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
+ if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
+ vhost_net_tx_err(net);
+ if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
+ vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
+ vhost_add_used_and_signal(vq->dev, vq,
+ vq->heads[i].id, 0);
+ ++j;
+ } else
+ break;
+ }
+ if (j)
+ vq->done_idx = i;
+ return j;
+}
+
+static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
+{
+ struct vhost_ubuf_ref *ubufs = ubuf->ctx;
+ struct vhost_virtqueue *vq = ubufs->vq;
+ int cnt = atomic_read(&ubufs->kref.refcount);
+
+ /*
+ * Trigger polling thread if guest stopped submitting new buffers:
+ * in this case, the refcount after decrement will eventually reach 1
+ * so here it is 2.
+ * We also trigger polling periodically after each 16 packets
+ * (the value 16 here is more or less arbitrary, it's tuned to trigger
+ * less than 10% of times).
+ */
+ if (cnt <= 2 || !(cnt % 16))
+ vhost_poll_queue(&vq->poll);
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ vhost_ubuf_put(ubufs);
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
@@ -172,7 +261,7 @@ static void handle_tx(struct vhost_net *net)
for (;;) {
/* Release DMAs done buffers first */
if (zcopy)
- vhost_zerocopy_signal_used(vq);
+ vhost_zerocopy_signal_used(net, vq);
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
@@ -227,7 +316,8 @@ static void handle_tx(struct vhost_net *net)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy) {
vq->heads[vq->upend_idx].id = head;
- if (len < VHOST_GOODCOPY_LEN) {
+ if (!vhost_net_tx_select_zcopy(net) ||
+ len < VHOST_GOODCOPY_LEN) {
/* copy don't need to wait for DMA done */
vq->heads[vq->upend_idx].len =
VHOST_DMA_DONE_LEN;
@@ -237,7 +327,8 @@ static void handle_tx(struct vhost_net *net)
} else {
struct ubuf_info *ubuf = &vq->ubuf_info[head];
- vq->heads[vq->upend_idx].len = len;
+ vq->heads[vq->upend_idx].len =
+ VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = vq->ubufs;
ubuf->desc = vq->upend_idx;
@@ -268,8 +359,9 @@ static void handle_tx(struct vhost_net *net)
if (!zcopy)
vhost_add_used_and_signal(&net->dev, vq, head, 0);
else
- vhost_zerocopy_signal_used(vq);
+ vhost_zerocopy_signal_used(net, vq);
total_len += len;
+ vhost_net_tx_packet(net);
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
break;
@@ -594,9 +686,18 @@ static int vhost_net_release(struct inode *inode, struct file *f)
struct vhost_net *n = f->private_data;
struct socket *tx_sock;
struct socket *rx_sock;
+ int i;
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
+ for (i = 0; i < n->dev.nvqs; ++i) {
+ /* Wait for all lower device DMAs done. */
+ if (n->dev.vqs[i].ubufs)
+ vhost_ubuf_put_and_wait(n->dev.vqs[i].ubufs);
+
+ vhost_zerocopy_signal_used(n, &n->dev.vqs[i]);
+ }
vhost_dev_cleanup(&n->dev, false);
if (tx_sock)
fput(tx_sock->file);
@@ -729,7 +830,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
if (oldubufs) {
vhost_ubuf_put_and_wait(oldubufs);
mutex_lock(&vq->mutex);
- vhost_zerocopy_signal_used(vq);
+ vhost_zerocopy_signal_used(n, vq);
mutex_unlock(&vq->mutex);
}
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index aa31692..23c138f 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -895,6 +895,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
vhost_scsi_clear_endpoint(s, &backend);
}
+ vhost_dev_stop(&s->dev);
vhost_dev_cleanup(&s->dev, false);
kfree(s);
return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 99ac2cb..ef8f598 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -26,10 +26,6 @@
#include <linux/kthread.h>
#include <linux/cgroup.h>
-#include <linux/net.h>
-#include <linux/if_packet.h>
-#include <linux/if_arp.h>
-
#include "vhost.h"
enum {
@@ -414,28 +410,16 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
return 0;
}
-/* In case of DMA done not in order in lower device driver for some reason.
- * upend_idx is used to track end of used idx, done_idx is used to track head
- * of used idx. Once lower device DMA done contiguously, we will signal KVM
- * guest used idx.
- */
-int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
+void vhost_dev_stop(struct vhost_dev *dev)
{
int i;
- int j = 0;
-
- for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
- if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
- vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
- vhost_add_used_and_signal(vq->dev, vq,
- vq->heads[i].id, 0);
- ++j;
- } else
- break;
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
+ vhost_poll_stop(&dev->vqs[i].poll);
+ vhost_poll_flush(&dev->vqs[i].poll);
+ }
}
- if (j)
- vq->done_idx = i;
- return j;
}
/* Caller should have device mutex if and only if locked is set */
@@ -444,17 +428,6 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
int i;
for (i = 0; i < dev->nvqs; ++i) {
- if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
- vhost_poll_stop(&dev->vqs[i].poll);
- vhost_poll_flush(&dev->vqs[i].poll);
- }
- /* Wait for all lower device DMAs done. */
- if (dev->vqs[i].ubufs)
- vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
-
- /* Signal guest as appropriate. */
- vhost_zerocopy_signal_used(&dev->vqs[i]);
-
if (dev->vqs[i].error_ctx)
eventfd_ctx_put(dev->vqs[i].error_ctx);
if (dev->vqs[i].error)
@@ -1599,14 +1572,3 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
kfree(ubufs);
}
-
-void vhost_zerocopy_callback(struct ubuf_info *ubuf)
-{
- struct vhost_ubuf_ref *ubufs = ubuf->ctx;
- struct vhost_virtqueue *vq = ubufs->vq;
-
- vhost_poll_queue(&vq->poll);
- /* set len = 1 to mark this desc buffers done DMA */
- vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
- kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
-}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1125af3..5e19e3d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -7,17 +7,11 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
-#include <linux/skbuff.h>
#include <linux/uio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
-/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
- * done */
-#define VHOST_DMA_DONE_LEN 1
-#define VHOST_DMA_CLEAR_LEN 0
-
struct vhost_device;
struct vhost_work;
@@ -70,6 +64,8 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
void vhost_ubuf_put(struct vhost_ubuf_ref *);
void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
+struct ubuf_info;
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -167,6 +163,7 @@ long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
long vhost_dev_check_owner(struct vhost_dev *);
long vhost_dev_reset_owner(struct vhost_dev *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
+void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
int vhost_log_access_ok(struct vhost_dev *);
@@ -191,8 +188,6 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
-void vhost_zerocopy_callback(struct ubuf_info *);
-int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \