summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/README.ipw22002
-rw-r--r--Documentation/networking/caif/spi_porting.txt208
-rw-r--r--MAINTAINERS10
-rw-r--r--arch/microblaze/include/asm/system.h3
-rw-r--r--arch/powerpc/include/asm/system.h3
-rw-r--r--arch/x86/include/asm/system.h7
-rw-r--r--drivers/net/3c59x.c352
-rw-r--r--drivers/net/Kconfig26
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c37
-rw-r--r--drivers/net/benet/be_cmds.h27
-rw-r--r--drivers/net/benet/be_ethtool.c55
-rw-r--r--drivers/net/benet/be_hw.h2
-rw-r--r--drivers/net/benet/be_main.c13
-rw-r--r--drivers/net/bfin_mac.c123
-rw-r--r--drivers/net/bfin_mac.h5
-rw-r--r--drivers/net/bnx2.c11
-rw-r--r--drivers/net/bnx2x_main.c3
-rw-r--r--drivers/net/caif/Kconfig22
-rw-r--r--drivers/net/caif/Makefile14
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_spi.c847
-rw-r--r--drivers/net/caif/caif_spi_slave.c252
-rw-r--r--drivers/net/cnic.c420
-rw-r--r--drivers/net/cnic.h23
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cpmac.c8
-rw-r--r--drivers/net/cxgb3/version.h4
-rw-r--r--drivers/net/cxgb4/cxgb4.h6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c394
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/cxgb4/l2t.c7
-rw-r--r--drivers/net/cxgb4/sge.c4
-rw-r--r--drivers/net/cxgb4/t4_hw.c96
-rw-r--r--drivers/net/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/cxgb4/t4_msg.h14
-rw-r--r--drivers/net/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/cxgb4/t4fw_api.h61
-rw-r--r--drivers/net/cxgb4vf/Makefile7
-rw-r--r--drivers/net/cxgb4vf/adapter.h540
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c2906
-rw-r--r--drivers/net/cxgb4vf/sge.c2449
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h273
-rw-r--r--drivers/net/cxgb4vf/t4vf_defs.h121
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c1333
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/defines.h4
-rw-r--r--drivers/net/e1000e/e1000.h9
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c144
-rw-r--r--drivers/net/e1000e/hw.h15
-rw-r--r--drivers/net/e1000e/ich8lan.c456
-rw-r--r--drivers/net/e1000e/lib.c2
-rw-r--r--drivers/net/e1000e/netdev.c306
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c5
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c11
-rw-r--r--drivers/net/enic/cq_desc.h2
-rw-r--r--drivers/net/enic/cq_enet_desc.h20
-rw-r--r--drivers/net/enic/enic.h21
-rw-r--r--drivers/net/enic/enic_main.c517
-rw-r--r--drivers/net/enic/enic_res.c53
-rw-r--r--drivers/net/enic/enic_res.h33
-rw-r--r--drivers/net/enic/rq_enet_desc.h2
-rw-r--r--drivers/net/enic/vnic_cq.c4
-rw-r--r--drivers/net/enic/vnic_cq.h2
-rw-r--r--drivers/net/enic/vnic_dev.c272
-rw-r--r--drivers/net/enic/vnic_dev.h21
-rw-r--r--drivers/net/enic/vnic_devcmd.h35
-rw-r--r--drivers/net/enic/vnic_enet.h4
-rw-r--r--drivers/net/enic/vnic_intr.c5
-rw-r--r--drivers/net/enic/vnic_intr.h8
-rw-r--r--drivers/net/enic/vnic_nic.h2
-rw-r--r--drivers/net/enic/vnic_resource.h2
-rw-r--r--drivers/net/enic/vnic_rq.c40
-rw-r--r--drivers/net/enic/vnic_rq.h16
-rw-r--r--drivers/net/enic/vnic_rss.h2
-rw-r--r--drivers/net/enic/vnic_stats.h2
-rw-r--r--drivers/net/enic/vnic_vic.c3
-rw-r--r--drivers/net/enic/vnic_wq.c25
-rw-r--r--drivers/net/enic/vnic_wq.h16
-rw-r--r--drivers/net/enic/wq_enet_desc.h2
-rw-r--r--drivers/net/fec.c30
-rw-r--r--drivers/net/gianfar.c113
-rw-r--r--drivers/net/gianfar.h13
-rw-r--r--drivers/net/igb/e1000_82575.c8
-rw-r--r--drivers/net/igb/e1000_defines.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c17
-rw-r--r--drivers/net/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c13
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c88
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ksz884x.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/ll_temac_main.c25
-rw-r--r--drivers/net/loopback.c62
-rw-r--r--drivers/net/macvlan.c65
-rw-r--r--drivers/net/mipsnet.c2
-rw-r--r--drivers/net/mlx4/en_ethtool.c38
-rw-r--r--drivers/net/mv643xx_eth.c7
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c3
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c13
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c13
-rw-r--r--drivers/net/niu.c9
-rw-r--r--drivers/net/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c1
-rw-r--r--drivers/net/phy/broadcom.c46
-rw-r--r--drivers/net/phy/mdio-octeon.c6
-rw-r--r--drivers/net/phy/micrel.c167
-rw-r--r--drivers/net/qlcnic/qlcnic.h113
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c175
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c22
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h6
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c48
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c120
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c773
-rw-r--r--drivers/net/s2io.c50
-rw-r--r--drivers/net/sfc/efx.c232
-rw-r--r--drivers/net/sfc/efx.h5
-rw-r--r--drivers/net/sfc/ethtool.c164
-rw-r--r--drivers/net/sfc/falcon.c192
-rw-r--r--drivers/net/sfc/falcon_boards.c35
-rw-r--r--drivers/net/sfc/falcon_xmac.c5
-rw-r--r--drivers/net/sfc/io.h37
-rw-r--r--drivers/net/sfc/mcdi.c98
-rw-r--r--drivers/net/sfc/mcdi_mac.c8
-rw-r--r--drivers/net/sfc/mcdi_phy.c20
-rw-r--r--drivers/net/sfc/mdio_10g.c39
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/mtd.c23
-rw-r--r--drivers/net/sfc/net_driver.h46
-rw-r--r--drivers/net/sfc/nic.c504
-rw-r--r--drivers/net/sfc/nic.h5
-rw-r--r--drivers/net/sfc/qt202x_phy.c42
-rw-r--r--drivers/net/sfc/rx.c80
-rw-r--r--drivers/net/sfc/selftest.c126
-rw-r--r--drivers/net/sfc/siena.c68
-rw-r--r--drivers/net/sfc/tenxpress.c12
-rw-r--r--drivers/net/sfc/tx.c41
-rw-r--r--drivers/net/sky2.c35
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/usb/hso.c1
-rw-r--r--drivers/net/usb/pegasus.c125
-rw-r--r--drivers/net/usb/pegasus.h296
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c13
-rw-r--r--drivers/net/wireless/at76c50x-usb.c108
-rw-r--r--drivers/net/wireless/at76c50x-usb.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h12
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c335
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c145
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h310
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c742
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c361
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c714
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c394
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h232
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c108
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c263
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h86
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c172
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c133
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/sdio.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c13
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c160
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-calib.c)26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c72
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c284
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c89
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c242
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c110
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c20
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c12
-rw-r--r--drivers/net/wireless/libertas/Makefile3
-rw-r--r--drivers/net/wireless/libertas/assoc.c2264
-rw-r--r--drivers/net/wireless/libertas/assoc.h155
-rw-r--r--drivers/net/wireless/libertas/cfg.c2038
-rw-r--r--drivers/net/wireless/libertas/cfg.h21
-rw-r--r--drivers/net/wireless/libertas/cmd.c87
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c83
-rw-r--r--drivers/net/wireless/libertas/debugfs.c54
-rw-r--r--drivers/net/wireless/libertas/decl.h8
-rw-r--r--drivers/net/wireless/libertas/dev.h62
-rw-r--r--drivers/net/wireless/libertas/ethtool.c5
-rw-r--r--drivers/net/wireless/libertas/host.h28
-rw-r--r--drivers/net/wireless/libertas/main.c226
-rw-r--r--drivers/net/wireless/libertas/mesh.c6
-rw-r--r--drivers/net/wireless/libertas/mesh.h5
-rw-r--r--drivers/net/wireless/libertas/rx.c121
-rw-r--r--drivers/net/wireless/libertas/scan.c1354
-rw-r--r--drivers/net/wireless/libertas/scan.h63
-rw-r--r--drivers/net/wireless/libertas/tx.c12
-rw-r--r--drivers/net/wireless/libertas/wext.c2353
-rw-r--r--drivers/net/wireless/libertas/wext.h17
-rw-r--r--drivers/net/wireless/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c160
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c66
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c46
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h18
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c51
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h3
-rw-r--r--drivers/s390/net/qeth_core.h22
-rw-r--r--drivers/s390/net/qeth_core_main.c369
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_l2_main.c101
-rw-r--r--drivers/s390/net/qeth_l3_main.c252
-rw-r--r--drivers/s390/net/smsgiucv.c11
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/staging/batman-adv/hard-interface.c2
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--firmware/Makefile2
-rw-r--r--firmware/cxgb3/t3fw-7.10.0.bin.ihex1935
-rw-r--r--firmware/cxgb3/t3fw-7.4.0.bin.ihex1917
-rw-r--r--include/linux/brcmphy.h7
-rw-r--r--include/linux/caif/caif_socket.h34
-rw-r--r--include/linux/eeprom_93cx6.h1
-rw-r--r--include/linux/ethtool.h17
-rw-r--r--include/linux/filter.h48
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_macvlan.h19
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/netdevice.h15
-rw-r--r--include/linux/netpoll.h24
-rw-r--r--include/linux/nl80211.h22
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/u64_stats_sync.h140
-rw-r--r--include/linux/user_namespace.h14
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/caif/caif_dev.h8
-rw-r--r--include/net/caif/caif_layer.h6
-rw-r--r--include/net/caif/caif_spi.h153
-rw-r--r--include/net/caif/cfcnfg.h16
-rw-r--r--include/net/caif/cfsrvl.h15
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h1
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/inetpeer.h30
-rw-r--r--include/net/ip.h22
-rw-r--r--include/net/ipv6.h12
-rw-r--r--include/net/mac80211.h113
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/sch_generic.h11
-rw-r--r--include/net/scm.h30
-rw-r--r--include/net/snmp.h77
-rw-r--r--include/net/sock.h3
-rw-r--r--include/net/tcp.h26
-rw-r--r--kernel/user_namespace.c44
-rw-r--r--net/8021q/vlan.h13
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/8021q/vlan_dev.c46
-rw-r--r--net/Makefile4
-rw-r--r--net/bridge/br_device.c133
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_forward.c38
-rw-r--r--net/bridge/br_if.c29
-rw-r--r--net/bridge/br_input.c11
-rw-r--r--net/bridge/br_netfilter.c15
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_notify.c5
-rw-r--r--net/bridge/br_private.h61
-rw-r--r--net/bridge/br_stp_bpdu.c5
-rw-r--r--net/bridge/netfilter/ebt_redirect.c3
-rw-r--r--net/bridge/netfilter/ebt_ulog.c8
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/Kconfig7
-rw-r--r--net/caif/Makefile14
-rw-r--r--net/caif/caif_config_util.c5
-rw-r--r--net/caif/caif_dev.c12
-rw-r--r--net/caif/caif_socket.c58
-rw-r--r--net/caif/cfcnfg.c46
-rw-r--r--net/caif/cfctrl.c8
-rw-r--r--net/caif/cfdbgl.c2
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cfpkt_skbuff.c5
-rw-r--r--net/caif/cfrfml.c318
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c26
-rw-r--r--net/caif/cfutill.c8
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c2
-rw-r--r--net/caif/chnl_net.c67
-rw-r--r--net/core/dev.c80
-rw-r--r--net/core/ethtool.c108
-rw-r--r--net/core/filter.c212
-rw-r--r--net/core/flow.c4
-rw-r--r--net/core/netpoll.c163
-rw-r--r--net/core/pktgen.c142
-rw-r--r--net/core/scm.c24
-rw-r--r--net/core/sock.c33
-rw-r--r--net/dccp/ackvec.c2
-rw-r--r--net/dccp/ccids/ccid3.c4
-rw-r--r--net/dccp/dccp.h12
-rw-r--r--net/dccp/input.c13
-rw-r--r--net/dccp/options.c20
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/ipv4/af_inet.c65
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/inetpeer.c172
-rw-r--r--net/ipv4/ip_fragment.c24
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c9
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/proc.c15
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/syncookies.c59
-rw-r--r--net/ipv4/tcp.c14
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_output.c65
-rw-r--r--net/ipv6/addrconf.c43
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/proc.c17
-rw-r--r--net/ipv6/reassembly.c21
-rw-r--r--net/ipv6/syncookies.c8
-rw-r--r--net/ipv6/tcp_ipv6.c15
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/agg-rx.c123
-rw-r--r--net/mac80211/agg-tx.c549
-rw-r--r--net/mac80211/cfg.c82
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_sta.c17
-rw-r--r--net/mac80211/driver-ops.h113
-rw-r--r--net/mac80211/driver-trace.h225
-rw-r--r--net/mac80211/ht.c50
-rw-r--r--net/mac80211/ibss.c97
-rw-r--r--net/mac80211/ieee80211_i.h68
-rw-r--r--net/mac80211/iface.c174
-rw-r--r--net/mac80211/key.c2
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/main.c75
-rw-r--r--net/mac80211/mesh.c73
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/mac80211/mesh_plink.c42
-rw-r--r--net/mac80211/mlme.c254
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c5
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h2
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c2
-rw-r--r--net/mac80211/rx.c155
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/mac80211/sta_info.h79
-rw-r--r--net/mac80211/status.c4
-rw-r--r--net/mac80211/tx.c93
-rw-r--r--net/mac80211/util.c31
-rw-r--r--net/mac80211/work.c2
-rw-r--r--net/mac80211/wpa.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c32
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/xt_TCPMSS.c4
-rw-r--r--net/netlink/af_netlink.c11
-rw-r--r--net/sched/act_mirred.c12
-rw-r--r--net/sched/act_nat.c31
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/unix/af_unix.c97
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/mlme.c5
-rw-r--r--net/wireless/nl80211.c88
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/util.c4
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/xfrm/xfrm_policy.c3
458 files changed, 27930 insertions, 15991 deletions
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
index 80c7285..e4d3267 100644
--- a/Documentation/networking/README.ipw2200
+++ b/Documentation/networking/README.ipw2200
@@ -171,7 +171,7 @@ Where the supported parameter are:
led
Can be used to turn on experimental LED code.
- 0 = Off, 1 = On. Default is 0.
+ 0 = Off, 1 = On. Default is 1.
mode
Can be used to set the default mode of the adapter.
diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt
new file mode 100644
index 0000000..61d7c92
--- /dev/null
+++ b/Documentation/networking/caif/spi_porting.txt
@@ -0,0 +1,208 @@
+- CAIF SPI porting -
+
+- CAIF SPI basics:
+
+Running CAIF over SPI needs some extra setup, owing to the nature of SPI.
+Two extra GPIOs have been added in order to negotiate the transfers
+ between the master and the slave. The minimum requirement for running
+CAIF over SPI is a SPI slave chip and two GPIOs (more details below).
+Please note that running as a slave implies that you need to keep up
+with the master clock. An overrun or underrun event is fatal.
+
+- CAIF SPI framework:
+
+To make porting as easy as possible, the CAIF SPI has been divided in
+two parts. The first part (called the interface part) deals with all
+generic functionality such as length framing, SPI frame negotiation
+and SPI frame delivery and transmission. The other part is the CAIF
+SPI slave device part, which is the module that you have to write if
+you want to run SPI CAIF on a new hardware. This part takes care of
+the physical hardware, both with regard to SPI and to GPIOs.
+
+- Implementing a CAIF SPI device:
+
+ - Functionality provided by the CAIF SPI slave device:
+
+ In order to implement a SPI device you will, as a minimum,
+ need to implement the following
+ functions:
+
+ int (*init_xfer) (struct cfspi_xfer * xfer, struct cfspi_dev *dev):
+
+ This function is called by the CAIF SPI interface to give
+ you a chance to set up your hardware to be ready to receive
+ a stream of data from the master. The xfer structure contains
+ both physical and logical adresses, as well as the total length
+ of the transfer in both directions.The dev parameter can be used
+ to map to different CAIF SPI slave devices.
+
+ void (*sig_xfer) (bool xfer, struct cfspi_dev *dev):
+
+ This function is called by the CAIF SPI interface when the output
+ (SPI_INT) GPIO needs to change state. The boolean value of the xfer
+ variable indicates whether the GPIO should be asserted (HIGH) or
+ deasserted (LOW). The dev parameter can be used to map to different CAIF
+ SPI slave devices.
+
+ - Functionality provided by the CAIF SPI interface:
+
+ void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
+
+ This function is called by the CAIF SPI slave device in order to
+ signal a change of state of the input GPIO (SS) to the interface.
+ Only active edges are mandatory to be reported.
+ This function can be called from IRQ context (recommended in order
+ not to introduce latency). The ifc parameter should be the pointer
+ returned from the platform probe function in the SPI device structure.
+
+ void (*xfer_done_cb) (struct cfspi_ifc *ifc);
+
+ This function is called by the CAIF SPI slave device in order to
+ report that a transfer is completed. This function should only be
+ called once both the transmission and the reception are completed.
+ This function can be called from IRQ context (recommended in order
+ not to introduce latency). The ifc parameter should be the pointer
+ returned from the platform probe function in the SPI device structure.
+
+ - Connecting the bits and pieces:
+
+ - Filling in the SPI slave device structure:
+
+ Connect the necessary callback functions.
+ Indicate clock speed (used to calculate toggle delays).
+ Chose a suitable name (helps debugging if you use several CAIF
+ SPI slave devices).
+ Assign your private data (can be used to map to your structure).
+
+ - Filling in the SPI slave platform device structure:
+ Add name of driver to connect to ("cfspi_sspi").
+ Assign the SPI slave device structure as platform data.
+
+- Padding:
+
+In order to optimize throughput, a number of SPI padding options are provided.
+Padding can be enabled independently for uplink and downlink transfers.
+Padding can be enabled for the head, the tail and for the total frame size.
+The padding needs to be correctly configured on both sides of the link.
+The padding can be changed via module parameters in cfspi_sspi.c or via
+the sysfs directory of the cfspi_sspi driver (before device registration).
+
+- CAIF SPI device template:
+
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL), version 2.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <net/caif/caif_spi.h>
+
+MODULE_LICENSE("GPL");
+
+struct sspi_struct {
+ struct cfspi_dev sdev;
+ struct cfspi_xfer *xfer;
+};
+
+static struct sspi_struct slave;
+static struct platform_device slave_device;
+
+static irqreturn_t sspi_irq(int irq, void *arg)
+{
+ /* You only need to trigger on an edge to the active state of the
+ * SS signal. Once a edge is detected, the ss_cb() function should be
+ * called with the parameter assert set to true. It is OK
+ * (and even advised) to call the ss_cb() function in IRQ context in
+ * order not to add any delay. */
+
+ return IRQ_HANDLED;
+}
+
+static void sspi_complete(void *context)
+{
+ /* Normally the DMA or the SPI framework will call you back
+ * in something similar to this. The only thing you need to
+ * do is to call the xfer_done_cb() function, providing the pointer
+ * to the CAIF SPI interface. It is OK to call this function
+ * from IRQ context. */
+}
+
+static int sspi_init_xfer(struct cfspi_xfer *xfer, struct cfspi_dev *dev)
+{
+ /* Store transfer info. For a normal implementation you should
+ * set up your DMA here and make sure that you are ready to
+ * receive the data from the master SPI. */
+
+ struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
+
+ sspi->xfer = xfer;
+
+ return 0;
+}
+
+void sspi_sig_xfer(bool xfer, struct cfspi_dev *dev)
+{
+ /* If xfer is true then you should assert the SPI_INT to indicate to
+ * the master that you are ready to recieve the data from the master
+ * SPI. If xfer is false then you should de-assert SPI_INT to indicate
+ * that the transfer is done.
+ */
+
+ struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
+}
+
+static void sspi_release(struct device *dev)
+{
+ /*
+ * Here you should release your SPI device resources.
+ */
+}
+
+static int __init sspi_init(void)
+{
+ /* Here you should initialize your SPI device by providing the
+ * necessary functions, clock speed, name and private data. Once
+ * done, you can register your device with the
+ * platform_device_register() function. This function will return
+ * with the CAIF SPI interface initialized. This is probably also
+ * the place where you should set up your GPIOs, interrupts and SPI
+ * resources. */
+
+ int res = 0;
+
+ /* Initialize slave device. */
+ slave.sdev.init_xfer = sspi_init_xfer;
+ slave.sdev.sig_xfer = sspi_sig_xfer;
+ slave.sdev.clk_mhz = 13;
+ slave.sdev.priv = &slave;
+ slave.sdev.name = "spi_sspi";
+ slave_device.dev.release = sspi_release;
+
+ /* Initialize platform device. */
+ slave_device.name = "cfspi_sspi";
+ slave_device.dev.platform_data = &slave.sdev;
+
+ /* Register platform device. */
+ res = platform_device_register(&slave_device);
+ if (res) {
+ printk(KERN_WARNING "sspi_init: failed to register dev.\n");
+ return -ENODEV;
+ }
+
+ return res;
+}
+
+static void __exit sspi_exit(void)
+{
+ platform_device_del(&slave_device);
+}
+
+module_init(sspi_init);
+module_exit(sspi_exit);
diff --git a/MAINTAINERS b/MAINTAINERS
index 0924a76..2ebb567 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2978,20 +2978,14 @@ F: drivers/net/ixgb/
F: drivers/net/ixgbe/
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
-M: Reinette Chatre <reinette.chatre@intel.com>
-M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
-W: http://ipw2100.sourceforge.net
-S: Odd Fixes
+S: Orphan
F: Documentation/networking/README.ipw2100
F: drivers/net/wireless/ipw2x00/ipw2100.*
INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
-M: Reinette Chatre <reinette.chatre@intel.com>
-M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
-W: http://ipw2200.sourceforge.net
-S: Odd Fixes
+S: Orphan
F: Documentation/networking/README.ipw2200
F: drivers/net/wireless/ipw2x00/ipw2200.*
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 48c4f03..81e1f7d 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -101,10 +101,7 @@ extern struct dentry *of_debugfs_root;
* MicroBlaze doesn't handle unaligned accesses in hardware.
*
* Based on this we force the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 2
-#define NET_SKB_PAD L1_CACHE_BYTES
#endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index a6297c6..6c294ac 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -515,11 +515,8 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 0
-#define NET_SKB_PAD L1_CACHE_BYTES
#define cmpxchg64(ptr, o, n) \
({ \
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index b8fe48e..1db9bd2 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -457,4 +457,11 @@ static inline void rdtsc_barrier(void)
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be quite expensive on some Nehalem processors.
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
#endif /* _ASM_X86_SYSTEM_H */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index d75803e..069a03f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -435,7 +435,6 @@ MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
First the windows. There are eight register windows, with the command
and status registers available in each.
*/
-#define EL3WINDOW(win_num) iowrite16(SelectWindow + (win_num), ioaddr + EL3_CMD)
#define EL3_CMD 0x0e
#define EL3_STATUS 0x0e
@@ -645,10 +644,51 @@ struct vortex_private {
u16 deferred; /* Resend these interrupts when we
* bale from the ISR */
u16 io_size; /* Size of PCI region (for release_region) */
- spinlock_t lock; /* Serialise access to device & its vortex_private */
- struct mii_if_info mii; /* MII lib hooks/info */
+
+ /* Serialises access to hardware other than MII and variables below.
+ * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+ spinlock_t lock;
+
+ spinlock_t mii_lock; /* Serialises access to MII */
+ struct mii_if_info mii; /* MII lib hooks/info */
+ spinlock_t window_lock; /* Serialises access to windowed regs */
+ int window; /* Register window */
};
+static void window_set(struct vortex_private *vp, int window)
+{
+ if (window != vp->window) {
+ iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
+ vp->window = window;
+ }
+}
+
+#define DEFINE_WINDOW_IO(size) \
+static u ## size \
+window_read ## size(struct vortex_private *vp, int window, int addr) \
+{ \
+ unsigned long flags; \
+ u ## size ret; \
+ spin_lock_irqsave(&vp->window_lock, flags); \
+ window_set(vp, window); \
+ ret = ioread ## size(vp->ioaddr + addr); \
+ spin_unlock_irqrestore(&vp->window_lock, flags); \
+ return ret; \
+} \
+static void \
+window_write ## size(struct vortex_private *vp, u ## size value, \
+ int window, int addr) \
+{ \
+ unsigned long flags; \
+ spin_lock_irqsave(&vp->window_lock, flags); \
+ window_set(vp, window); \
+ iowrite ## size(value, vp->ioaddr + addr); \
+ spin_unlock_irqrestore(&vp->window_lock, flags); \
+}
+DEFINE_WINDOW_IO(8)
+DEFINE_WINDOW_IO(16)
+DEFINE_WINDOW_IO(32)
+
#ifdef CONFIG_PCI
#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
#else
@@ -711,7 +751,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
static int vortex_up(struct net_device *dev);
static void vortex_down(struct net_device *dev, int final);
static int vortex_open(struct net_device *dev);
-static void mdio_sync(void __iomem *ioaddr, int bits);
+static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(unsigned long arg);
@@ -1119,6 +1159,7 @@ static int __devinit vortex_probe1(struct device *gendev,
vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
vp->io_size = vci->io_size;
vp->card_idx = card_idx;
+ vp->window = -1;
/* module list only for Compaq device */
if (gendev == NULL) {
@@ -1154,6 +1195,8 @@ static int __devinit vortex_probe1(struct device *gendev,
}
spin_lock_init(&vp->lock);
+ spin_lock_init(&vp->mii_lock);
+ spin_lock_init(&vp->window_lock);
vp->gendev = gendev;
vp->mii.dev = dev;
vp->mii.mdio_read = mdio_read;
@@ -1205,7 +1248,6 @@ static int __devinit vortex_probe1(struct device *gendev,
vp->mii.force_media = vp->full_duplex;
vp->options = option;
/* Read the station address from the EEPROM. */
- EL3WINDOW(0);
{
int base;
@@ -1218,14 +1260,15 @@ static int __devinit vortex_probe1(struct device *gendev,
for (i = 0; i < 0x40; i++) {
int timer;
- iowrite16(base + i, ioaddr + Wn0EepromCmd);
+ window_write16(vp, base + i, 0, Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
for (timer = 10; timer >= 0; timer--) {
udelay(162);
- if ((ioread16(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ if ((window_read16(vp, 0, Wn0EepromCmd) &
+ 0x8000) == 0)
break;
}
- eeprom[i] = ioread16(ioaddr + Wn0EepromData);
+ eeprom[i] = window_read16(vp, 0, Wn0EepromData);
}
}
for (i = 0; i < 0x18; i++)
@@ -1250,9 +1293,8 @@ static int __devinit vortex_probe1(struct device *gendev,
pr_err("*** EEPROM MAC address is invalid.\n");
goto free_ring; /* With every pack */
}
- EL3WINDOW(2);
for (i = 0; i < 6; i++)
- iowrite8(dev->dev_addr[i], ioaddr + i);
+ window_write8(vp, dev->dev_addr[i], 2, i);
if (print_info)
pr_cont(", IRQ %d\n", dev->irq);
@@ -1261,8 +1303,7 @@ static int __devinit vortex_probe1(struct device *gendev,
pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
- EL3WINDOW(4);
- step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+ step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
if (print_info) {
pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
@@ -1285,17 +1326,15 @@ static int __devinit vortex_probe1(struct device *gendev,
(unsigned long long)pci_resource_start(pdev, 2),
vp->cb_fn_base);
}
- EL3WINDOW(2);
- n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
+ n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
if (vp->drv_flags & INVERT_LED_PWR)
n |= 0x10;
if (vp->drv_flags & INVERT_MII_PWR)
n |= 0x4000;
- iowrite16(n, ioaddr + Wn2_ResetOptions);
+ window_write16(vp, n, 2, Wn2_ResetOptions);
if (vp->drv_flags & WNO_XCVR_PWR) {
- EL3WINDOW(0);
- iowrite16(0x0800, ioaddr);
+ window_write16(vp, 0x0800, 0, 0);
}
}
@@ -1313,14 +1352,13 @@ static int __devinit vortex_probe1(struct device *gendev,
{
static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
unsigned int config;
- EL3WINDOW(3);
- vp->available_media = ioread16(ioaddr + Wn3_Options);
+ vp->available_media = window_read16(vp, 3, Wn3_Options);
if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
vp->available_media = 0x40;
- config = ioread32(ioaddr + Wn3_Config);
+ config = window_read32(vp, 3, Wn3_Config);
if (print_info) {
pr_debug(" Internal config register is %4.4x, transceivers %#x.\n",
- config, ioread16(ioaddr + Wn3_Options));
+ config, window_read16(vp, 3, Wn3_Options));
pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
8 << RAM_SIZE(config),
RAM_WIDTH(config) ? "word" : "byte",
@@ -1346,7 +1384,6 @@ static int __devinit vortex_probe1(struct device *gendev,
if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
int phy, phy_idx = 0;
- EL3WINDOW(4);
mii_preamble_required++;
if (vp->drv_flags & EXTRA_PREAMBLE)
mii_preamble_required++;
@@ -1478,18 +1515,17 @@ static void
vortex_set_duplex(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
pr_info("%s: setting %s-duplex.\n",
dev->name, (vp->full_duplex) ? "full" : "half");
- EL3WINDOW(3);
/* Set the full-duplex bit. */
- iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
- (vp->large_frames ? 0x40 : 0) |
- ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
- 0x100 : 0),
- ioaddr + Wn3_MAC_Ctrl);
+ window_write16(vp,
+ ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+ (vp->large_frames ? 0x40 : 0) |
+ ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
+ 0x100 : 0),
+ 3, Wn3_MAC_Ctrl);
}
static void vortex_check_media(struct net_device *dev, unsigned int init)
@@ -1529,8 +1565,7 @@ vortex_up(struct net_device *dev)
}
/* Before initializing select the active media port. */
- EL3WINDOW(3);
- config = ioread32(ioaddr + Wn3_Config);
+ config = window_read32(vp, 3, Wn3_Config);
if (vp->media_override != 7) {
pr_info("%s: Media override to transceiver %d (%s).\n",
@@ -1577,10 +1612,9 @@ vortex_up(struct net_device *dev)
config = BFINS(config, dev->if_port, 20, 4);
if (vortex_debug > 6)
pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
- iowrite32(config, ioaddr + Wn3_Config);
+ window_write32(vp, config, 3, Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
- EL3WINDOW(4);
mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
@@ -1601,51 +1635,46 @@ vortex_up(struct net_device *dev)
iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
if (vortex_debug > 1) {
- EL3WINDOW(4);
pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
- dev->name, dev->irq, ioread16(ioaddr + Wn4_Media));
+ dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
}
/* Set the station address and mask in window 2 each time opened. */
- EL3WINDOW(2);
for (i = 0; i < 6; i++)
- iowrite8(dev->dev_addr[i], ioaddr + i);
+ window_write8(vp, dev->dev_addr[i], 2, i);
for (; i < 12; i+=2)
- iowrite16(0, ioaddr + i);
+ window_write16(vp, 0, 2, i);
if (vp->cb_fn_base) {
- unsigned short n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
+ unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
if (vp->drv_flags & INVERT_LED_PWR)
n |= 0x10;
if (vp->drv_flags & INVERT_MII_PWR)
n |= 0x4000;
- iowrite16(n, ioaddr + Wn2_ResetOptions);
+ window_write16(vp, n, 2, Wn2_ResetOptions);
}
if (dev->if_port == XCVR_10base2)
/* Start the thinnet transceiver. We should really wait 50ms...*/
iowrite16(StartCoax, ioaddr + EL3_CMD);
if (dev->if_port != XCVR_NWAY) {
- EL3WINDOW(4);
- iowrite16((ioread16(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
- media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+ window_write16(vp,
+ (window_read16(vp, 4, Wn4_Media) &
+ ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits,
+ 4, Wn4_Media);
}
/* Switch to the stats window, and clear all stats by reading. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
- EL3WINDOW(6);
for (i = 0; i < 10; i++)
- ioread8(ioaddr + i);
- ioread16(ioaddr + 10);
- ioread16(ioaddr + 12);
+ window_read8(vp, 6, i);
+ window_read16(vp, 6, 10);
+ window_read16(vp, 6, 12);
/* New: On the Vortex we must also clear the BadSSD counter. */
- EL3WINDOW(4);
- ioread8(ioaddr + 12);
+ window_read8(vp, 4, 12);
/* ..and on the Boomerang we enable the extra statistics bits. */
- iowrite16(0x0040, ioaddr + Wn4_NetDiag);
-
- /* Switch to register set 7 for normal use. */
- EL3WINDOW(7);
+ window_write16(vp, 0x0040, 4, Wn4_NetDiag);
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
vp->cur_rx = vp->dirty_rx = 0;
@@ -1763,7 +1792,7 @@ vortex_timer(unsigned long data)
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
int ok = 0;
- int media_status, old_window;
+ int media_status;
if (vortex_debug > 2) {
pr_debug("%s: Media selection timer tick happened, %s.\n",
@@ -1771,10 +1800,7 @@ vortex_timer(unsigned long data)
pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
}
- disable_irq_lockdep(dev->irq);
- old_window = ioread16(ioaddr + EL3_CMD) >> 13;
- EL3WINDOW(4);
- media_status = ioread16(ioaddr + Wn4_Media);
+ media_status = window_read16(vp, 4, Wn4_Media);
switch (dev->if_port) {
case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
if (media_status & Media_LnkBeat) {
@@ -1794,10 +1820,7 @@ vortex_timer(unsigned long data)
case XCVR_MII: case XCVR_NWAY:
{
ok = 1;
- /* Interrupts are already disabled */
- spin_lock(&vp->lock);
vortex_check_media(dev, 0);
- spin_unlock(&vp->lock);
}
break;
default: /* Other media types handled by Tx timeouts. */
@@ -1816,6 +1839,8 @@ vortex_timer(unsigned long data)
if (!ok) {
unsigned int config;
+ spin_lock_irq(&vp->lock);
+
do {
dev->if_port = media_tbl[dev->if_port].next;
} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
@@ -1830,19 +1855,22 @@ vortex_timer(unsigned long data)
dev->name, media_tbl[dev->if_port].name);
next_tick = media_tbl[dev->if_port].wait;
}
- iowrite16((media_status & ~(Media_10TP|Media_SQE)) |
- media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+ window_write16(vp,
+ (media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits,
+ 4, Wn4_Media);
- EL3WINDOW(3);
- config = ioread32(ioaddr + Wn3_Config);
+ config = window_read32(vp, 3, Wn3_Config);
config = BFINS(config, dev->if_port, 20, 4);
- iowrite32(config, ioaddr + Wn3_Config);
+ window_write32(vp, config, 3, Wn3_Config);
iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
ioaddr + EL3_CMD);
if (vortex_debug > 1)
pr_debug("wrote 0x%08x to Wn3_Config\n", config);
/* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
+
+ spin_unlock_irq(&vp->lock);
}
leave_media_alone:
@@ -1850,8 +1878,6 @@ leave_media_alone:
pr_debug("%s: Media selection timer finished, %s.\n",
dev->name, media_tbl[dev->if_port].name);
- EL3WINDOW(old_window);
- enable_irq_lockdep(dev->irq);
mod_timer(&vp->timer, RUN_AT(next_tick));
if (vp->deferred)
iowrite16(FakeIntr, ioaddr + EL3_CMD);
@@ -1865,12 +1891,11 @@ static void vortex_tx_timeout(struct net_device *dev)
pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
dev->name, ioread8(ioaddr + TxStatus),
ioread16(ioaddr + EL3_STATUS));
- EL3WINDOW(4);
pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n",
- ioread16(ioaddr + Wn4_NetDiag),
- ioread16(ioaddr + Wn4_Media),
+ window_read16(vp, 4, Wn4_NetDiag),
+ window_read16(vp, 4, Wn4_Media),
ioread32(ioaddr + PktStatus),
- ioread16(ioaddr + Wn4_FIFODiag));
+ window_read16(vp, 4, Wn4_FIFODiag));
/* Slight code bloat to be user friendly. */
if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
pr_err("%s: Transmitter encountered 16 collisions --"
@@ -1917,9 +1942,6 @@ static void vortex_tx_timeout(struct net_device *dev)
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies; /* prevent tx timeout */
-
- /* Switch to register set 7 for normal use. */
- EL3WINDOW(7);
}
/*
@@ -1980,10 +2002,10 @@ vortex_error(struct net_device *dev, int status)
ioread16(ioaddr + EL3_STATUS) & StatsFull) {
pr_warning("%s: Updating statistics failed, disabling "
"stats as an interrupt source.\n", dev->name);
- EL3WINDOW(5);
- iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+ iowrite16(SetIntrEnb |
+ (window_read16(vp, 5, 10) & ~StatsFull),
+ ioaddr + EL3_CMD);
vp->intr_enable &= ~StatsFull;
- EL3WINDOW(7);
DoneDidThat++;
}
}
@@ -1993,8 +2015,7 @@ vortex_error(struct net_device *dev, int status)
}
if (status & HostError) {
u16 fifo_diag;
- EL3WINDOW(4);
- fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
+ fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
@@ -2043,9 +2064,13 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
- iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
- ioaddr + Wn7_MasterAddr);
+ vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
+ PCI_DMA_TODEVICE);
+ spin_lock_irq(&vp->window_lock);
+ window_set(vp, 7);
+ iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
iowrite16(len, ioaddr + Wn7_MasterLen);
+ spin_unlock_irq(&vp->window_lock);
vp->tx_skb = skb;
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
@@ -2217,6 +2242,9 @@ vortex_interrupt(int irq, void *dev_id)
pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
+
do {
if (vortex_debug > 5)
pr_debug("%s: In interrupt loop, status %4.4x.\n",
@@ -2275,6 +2303,8 @@ vortex_interrupt(int irq, void *dev_id)
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+ spin_unlock(&vp->window_lock);
+
if (vortex_debug > 4)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
@@ -2760,85 +2790,58 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev)
static void update_stats(void __iomem *ioaddr, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- int old_window = ioread16(ioaddr + EL3_CMD);
- if (old_window == 0xffff) /* Chip suspended or ejected. */
- return;
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
- EL3WINDOW(6);
- dev->stats.tx_carrier_errors += ioread8(ioaddr + 0);
- dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
- dev->stats.tx_window_errors += ioread8(ioaddr + 4);
- dev->stats.rx_fifo_errors += ioread8(ioaddr + 5);
- dev->stats.tx_packets += ioread8(ioaddr + 6);
- dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
- /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
+ dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
+ dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
+ dev->stats.tx_window_errors += window_read8(vp, 6, 4);
+ dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
+ dev->stats.tx_packets += window_read8(vp, 6, 6);
+ dev->stats.tx_packets += (window_read8(vp, 6, 9) &
+ 0x30) << 4;
+ /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
- dev->stats.rx_bytes += ioread16(ioaddr + 10);
- dev->stats.tx_bytes += ioread16(ioaddr + 12);
+ dev->stats.rx_bytes += window_read16(vp, 6, 10);
+ dev->stats.tx_bytes += window_read16(vp, 6, 12);
/* Extra stats for get_ethtool_stats() */
- vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
- vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
- vp->xstats.tx_deferred += ioread8(ioaddr + 8);
- EL3WINDOW(4);
- vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
+ vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
+ vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
+ vp->xstats.tx_deferred += window_read8(vp, 6, 8);
+ vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
dev->stats.collisions = vp->xstats.tx_multiple_collisions
+ vp->xstats.tx_single_collisions
+ vp->xstats.tx_max_collisions;
{
- u8 up = ioread8(ioaddr + 13);
+ u8 up = window_read8(vp, 4, 13);
dev->stats.rx_bytes += (up & 0x0f) << 16;
dev->stats.tx_bytes += (up & 0xf0) << 12;
}
-
- EL3WINDOW(old_window >> 13);
}
static int vortex_nway_reset(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
- unsigned long flags;
- int rc;
- spin_lock_irqsave(&vp->lock, flags);
- EL3WINDOW(4);
- rc = mii_nway_restart(&vp->mii);
- spin_unlock_irqrestore(&vp->lock, flags);
- return rc;
+ return mii_nway_restart(&vp->mii);
}
static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
- unsigned long flags;
- int rc;
- spin_lock_irqsave(&vp->lock, flags);
- EL3WINDOW(4);
- rc = mii_ethtool_gset(&vp->mii, cmd);
- spin_unlock_irqrestore(&vp->lock, flags);
- return rc;
+ return mii_ethtool_gset(&vp->mii, cmd);
}
static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
- unsigned long flags;
- int rc;
- spin_lock_irqsave(&vp->lock, flags);
- EL3WINDOW(4);
- rc = mii_ethtool_sset(&vp->mii, cmd);
- spin_unlock_irqrestore(&vp->lock, flags);
- return rc;
+ return mii_ethtool_sset(&vp->mii, cmd);
}
static u32 vortex_get_msglevel(struct net_device *dev)
@@ -2930,7 +2933,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
pci_power_t state = 0;
@@ -2942,7 +2944,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
spin_lock_irqsave(&vp->lock, flags);
- EL3WINDOW(4);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
@@ -2985,8 +2986,6 @@ static void set_rx_mode(struct net_device *dev)
static void set_8021q_mode(struct net_device *dev, int enable)
{
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
- int old_window = ioread16(ioaddr + EL3_CMD);
int mac_ctrl;
if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
@@ -2997,28 +2996,23 @@ static void set_8021q_mode(struct net_device *dev, int enable)
if (enable)
max_pkt_size += 4; /* 802.1Q VLAN tag */
- EL3WINDOW(3);
- iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize);
+ window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
/* set VlanEtherType to let the hardware checksumming
treat tagged frames correctly */
- EL3WINDOW(7);
- iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
+ window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
} else {
/* on older cards we have to enable large frames */
vp->large_frames = dev->mtu > 1500 || enable;
- EL3WINDOW(3);
- mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl);
+ mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
if (vp->large_frames)
mac_ctrl |= 0x40;
else
mac_ctrl &= ~0x40;
- iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
+ window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
}
-
- EL3WINDOW(old_window);
}
#else
@@ -3037,7 +3031,10 @@ static void set_8021q_mode(struct net_device *dev, int enable)
/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
-#define mdio_delay() ioread32(mdio_addr)
+static void mdio_delay(struct vortex_private *vp)
+{
+ window_read32(vp, 4, Wn4_PhysicalMgmt);
+}
#define MDIO_SHIFT_CLK 0x01
#define MDIO_DIR_WRITE 0x04
@@ -3048,16 +3045,15 @@ static void set_8021q_mode(struct net_device *dev, int enable)
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
-static void mdio_sync(void __iomem *ioaddr, int bits)
+static void mdio_sync(struct vortex_private *vp, int bits)
{
- void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
-
/* Establish sync by sending at least 32 logic ones. */
while (-- bits >= 0) {
- iowrite16(MDIO_DATA_WRITE1, mdio_addr);
- mdio_delay();
- iowrite16(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
+ window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
}
}
@@ -3065,59 +3061,70 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
{
int i;
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
unsigned int retval = 0;
- void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ spin_lock_bh(&vp->mii_lock);
if (mii_preamble_required)
- mdio_sync(ioaddr, 32);
+ mdio_sync(vp, 32);
/* Shift the read command bits out. */
for (i = 14; i >= 0; i--) {
int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
- iowrite16(dataval, mdio_addr);
- mdio_delay();
- iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
+ window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, dataval | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
- iowrite16(MDIO_ENB_IN, mdio_addr);
- mdio_delay();
- retval = (retval << 1) | ((ioread16(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
- iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
+ window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ retval = (retval << 1) |
+ ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
+ MDIO_DATA_READ) ? 1 : 0);
+ window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
}
+
+ spin_unlock_bh(&vp->mii_lock);
+
return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
}
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
struct vortex_private *vp = netdev_priv(dev);
- void __iomem *ioaddr = vp->ioaddr;
int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
- void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
int i;
+ spin_lock_bh(&vp->mii_lock);
+
if (mii_preamble_required)
- mdio_sync(ioaddr, 32);
+ mdio_sync(vp, 32);
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
- iowrite16(dataval, mdio_addr);
- mdio_delay();
- iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
+ window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, dataval | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
}
/* Leave the interface idle. */
for (i = 1; i >= 0; i--) {
- iowrite16(MDIO_ENB_IN, mdio_addr);
- mdio_delay();
- iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
+ window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
}
+
+ spin_unlock_bh(&vp->mii_lock);
}
/* ACPI: Advanced Configuration and Power Interface. */
@@ -3131,8 +3138,7 @@ static void acpi_set_WOL(struct net_device *dev)
if (vp->enable_wol) {
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
- EL3WINDOW(7);
- iowrite16(2, ioaddr + 0x0c);
+ window_write16(vp, 2, 7, 0x0c);
/* The RxFilter must accept the WOL frames. */
iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
iowrite16(RxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fe113d0..60d067a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2602,6 +2602,29 @@ config CHELSIO_T4
To compile this driver as a module choose M here; the module
will be called cxgb4.
+config CHELSIO_T4VF_DEPENDS
+ tristate
+ depends on PCI && INET
+ default y
+
+config CHELSIO_T4VF
+ tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+ depends on CHELSIO_T4VF_DEPENDS
+ help
+ This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+ adapters with PCI-E SR-IOV Virtual Functions.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.htm>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called cxgb4vf.
+
config EHEA
tristate "eHEA Ethernet support"
depends on IBMEBUS && INET && SPARSEMEM
@@ -2615,7 +2638,6 @@ config EHEA
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI && INET
- select INET_LRO
help
This enables the support for the Cisco VIC Ethernet card.
@@ -2755,6 +2777,7 @@ config MYRI10GE_DCA
config NETXEN_NIC
tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
depends on PCI
+ select FW_LOADER
help
This enables the support for NetXen's Gigabit Ethernet card.
@@ -2820,6 +2843,7 @@ config BNX2X
config QLCNIC
tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
depends on PCI
+ select FW_LOADER
help
This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
devices.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0a0512a..ce55581 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP1000) += ipg.o
obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_BONDING) += bonding/
@@ -275,7 +276,7 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
-obj-y += wireless/
+obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
obj-$(CONFIG_HAMRADIO) += hamradio/
obj-$(CONFIG_IRDA) += irda/
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b46be49..1a0d2d0 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -282,6 +282,7 @@ struct be_adapter {
int link_speed;
u8 port_type;
u8 transceiver;
+ u8 autoneg;
u8 generation; /* BladeEngine ASIC generation */
u32 flash_status;
struct completion flash_compl;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index ee1ad96..344e062 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -25,6 +25,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+
+ wmb();
iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
}
@@ -1693,3 +1695,38 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_phy_info *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_GET_PHY_DETAILS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PHY_DETAILS,
+ sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 763dc19..912a058 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -144,6 +144,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_ETH_ACPI_CONFIG 2
#define OPCODE_ETH_PROMISCUOUS 3
@@ -869,6 +870,30 @@ struct be_cmd_resp_seeprom_read {
u8 seeprom_data[BE_READ_SEEPROM_LEN];
};
+enum {
+ PHY_TYPE_CX4_10GB = 0,
+ PHY_TYPE_XFP_10GB,
+ PHY_TYPE_SFP_1GB,
+ PHY_TYPE_SFP_PLUS_10GB,
+ PHY_TYPE_KR_10GB,
+ PHY_TYPE_KX4_10GB,
+ PHY_TYPE_BASET_10GB,
+ PHY_TYPE_BASET_1GB,
+ PHY_TYPE_DISABLED = 255
+};
+
+struct be_cmd_req_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[24];
+};
+struct be_cmd_resp_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u32 future_use[4];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -947,4 +972,6 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
+ struct be_dma_mem *cmd);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 200e985..c0ade24 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -314,10 +314,13 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
- u8 mac_speed = 0, connector = 0;
+ struct be_dma_mem phy_cmd;
+ struct be_cmd_resp_get_phy_info *resp;
+ u8 mac_speed = 0;
u16 link_speed = 0;
bool link_up = false;
int status;
+ u16 intf_type;
if (adapter->link_speed < 0) {
status = be_cmd_link_status_query(adapter, &link_up,
@@ -337,40 +340,57 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
}
- status = be_cmd_read_port_type(adapter, adapter->port_num,
- &connector);
+ phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+ phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
+ &phy_cmd.dma);
+ if (!phy_cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+ status = be_cmd_get_phy_info(adapter, &phy_cmd);
if (!status) {
- switch (connector) {
- case 7:
+ resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va;
+ intf_type = le16_to_cpu(resp->interface_type);
+
+ switch (intf_type) {
+ case PHY_TYPE_XFP_10GB:
+ case PHY_TYPE_SFP_1GB:
+ case PHY_TYPE_SFP_PLUS_10GB:
ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
- break;
- case 0:
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_EXTERNAL;
break;
default:
ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_INTERNAL;
break;
}
- } else {
- ecmd->port = PORT_AUI;
+
+ switch (intf_type) {
+ case PHY_TYPE_KR_10GB:
+ case PHY_TYPE_KX4_10GB:
+ ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
}
/* Save for future use */
adapter->link_speed = ecmd->speed;
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
+ adapter->autoneg = ecmd->autoneg;
+ pci_free_consistent(adapter->pdev, phy_cmd.size,
+ phy_cmd.va, phy_cmd.dma);
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
ecmd->transceiver = adapter->transceiver;
+ ecmd->autoneg = adapter->autoneg;
}
ecmd->duplex = DUPLEX_FULL;
- ecmd->autoneg = AUTONEG_DISABLE;
ecmd->phy_address = adapter->port_num;
switch (ecmd->port) {
case PORT_FIBRE:
@@ -384,6 +404,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
break;
}
+ if (ecmd->autoneg) {
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ }
+
return 0;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 063026d..0683967 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -192,7 +192,7 @@ struct amap_eth_hdr_wrb {
u8 event;
u8 crc;
u8 forward;
- u8 ipsec;
+ u8 lso6;
u8 mgmt;
u8 ipcs;
u8 udpcs;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3225774..b636879 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -89,6 +89,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
u32 val = 0;
val |= qid & DB_RQ_RING_ID_MASK;
val |= posted << DB_RQ_NUM_POSTED_SHIFT;
+
+ wmb();
iowrite32(val, adapter->db + DB_RQ_OFFSET);
}
@@ -97,6 +99,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
u32 val = 0;
val |= qid & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
+
+ wmb();
iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
}
@@ -373,10 +377,12 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
hdr, skb_shinfo(skb)->gso_size);
+ if (skb_is_gso_v6(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -971,6 +977,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
return NULL;
+ rmb();
be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
queue_tail_inc(&adapter->rx_obj.cq);
@@ -1064,6 +1071,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
return NULL;
+ rmb();
be_dws_le_to_cpu(txcp, sizeof(*txcp));
txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
@@ -1111,6 +1119,7 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
if (!eqe->evt)
return NULL;
+ rmb();
eqe->evt = le32_to_cpu(eqe->evt);
queue_tail_inc(&eq_obj->q);
return eqe;
@@ -2186,7 +2195,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
- NETIF_F_GRO;
+ NETIF_F_GRO | NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 368f333..012613f 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
# define bfin_tx_hwtstamp(dev, skb)
#endif
-static void adjust_tx_list(void)
+static inline void _tx_reclaim_skb(void)
+{
+ do {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ }
+ tx_list_head = tx_list_head->next;
+
+ } while (tx_list_head->status.status_word != 0);
+}
+
+static void tx_reclaim_skb(struct bfin_mac_local *lp)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
- if (tx_list_head->status.status_word != 0 &&
- current_tx_ptr != tx_list_head) {
- goto adjust_head; /* released something, just return; */
- }
+ if (tx_list_head->status.status_word != 0)
+ _tx_reclaim_skb();
- /*
- * if nothing released, check wait condition
- * current's next can not be the head,
- * otherwise the dma will not stop as we want
- */
- if (current_tx_ptr->next->next == tx_list_head) {
+ if (current_tx_ptr->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
+ /* slow down polling to avoid too many queue stop. */
udelay(10);
- if (tx_list_head->status.status_word != 0 ||
- !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
- goto adjust_head;
- }
- if (timeout_cnt-- < 0) {
- printk(KERN_ERR DRV_NAME
- ": wait for adjust tx list head timeout\n");
+ /* reclaim skb if DMA is not running. */
+ if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
+ break;
+ if (timeout_cnt-- < 0)
break;
- }
- }
- if (tx_list_head->status.status_word != 0) {
- goto adjust_head;
}
+
+ if (timeout_cnt >= 0)
+ _tx_reclaim_skb();
+ else
+ netif_stop_queue(lp->ndev);
}
- return;
+ if (current_tx_ptr->next != tx_list_head &&
+ netif_queue_stopped(lp->ndev))
+ netif_wake_queue(lp->ndev);
+
+ if (tx_list_head != current_tx_ptr) {
+ /* shorten the timer interval if tx queue is stopped */
+ if (netif_queue_stopped(lp->ndev))
+ lp->tx_reclaim_timer.expires =
+ jiffies + (TX_RECLAIM_JIFFIES >> 4);
+ else
+ lp->tx_reclaim_timer.expires =
+ jiffies + TX_RECLAIM_JIFFIES;
+
+ mod_timer(&lp->tx_reclaim_timer,
+ lp->tx_reclaim_timer.expires);
+ }
-adjust_head:
- do {
- tx_list_head->desc_a.config &= ~DMAEN;
- tx_list_head->status.status_word = 0;
- if (tx_list_head->skb) {
- dev_kfree_skb(tx_list_head->skb);
- tx_list_head->skb = NULL;
- } else {
- printk(KERN_ERR DRV_NAME
- ": no sk_buff in a transmitted frame!\n");
- }
- tx_list_head = tx_list_head->next;
- } while (tx_list_head->status.status_word != 0 &&
- current_tx_ptr != tx_list_head);
return;
+}
+static void tx_reclaim_skb_timeout(unsigned long lp)
+{
+ tx_reclaim_skb((struct bfin_mac_local *)lp);
}
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
union skb_shared_tx *shtx = skb_tx(skb);
@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
skb->len);
current_tx_ptr->desc_a.start_addr =
(u32)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
blackfin_dcache_flush_range(
(u32)current_tx_ptr->packet,
(u32)(current_tx_ptr->packet + skb->len + 2));
@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
*/
SSYNC();
+ /* always clear status buffer before start tx dma */
+ current_tx_ptr->status.status_word = 0;
+
/* enable this packet's dma */
current_tx_ptr->desc_a.config |= DMAEN;
@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
out:
- adjust_tx_list();
-
bfin_tx_hwtstamp(dev, skb);
current_tx_ptr = current_tx_ptr->next;
dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len);
+
+ tx_reclaim_skb(lp);
+
return NETDEV_TX_OK;
}
@@ -1167,8 +1181,11 @@ real_rx:
#ifdef CONFIG_NET_POLL_CONTROLLER
static void bfin_mac_poll(struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
disable_irq(IRQ_MAC_RX);
bfin_mac_interrupt(IRQ_MAC_RX, dev);
+ tx_reclaim_skb(lp);
enable_irq(IRQ_MAC_RX);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void)
/* Our watchdog timed out. Called by the networking layer */
static void bfin_mac_timeout(struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
pr_debug("%s: %s\n", dev->name, __func__);
bfin_mac_disable();
- /* reset tx queue */
- tx_list_tail = tx_list_head->next;
+ del_timer(&lp->tx_reclaim_timer);
+
+ /* reset tx queue and free skb */
+ while (tx_list_head != current_tx_ptr) {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ }
+ tx_list_head = tx_list_head->next;
+ }
+
+ if (netif_queue_stopped(lp->ndev))
+ netif_wake_queue(lp->ndev);
bfin_mac_enable();
@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
platform_set_drvdata(pdev, ndev);
lp = netdev_priv(ndev);
+ lp->ndev = ndev;
/* Grab the MAC address in the MAC */
*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops;
+ init_timer(&lp->tx_reclaim_timer);
+ lp->tx_reclaim_timer.data = (unsigned long)lp;
+ lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
+
spin_lock_init(&lp->lock);
/* now, enable interrupts */
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 1ae7b82..04e4050 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -13,9 +13,12 @@
#include <linux/net_tstamp.h>
#include <linux/clocksource.h>
#include <linux/timecompare.h>
+#include <linux/timer.h>
#define BFIN_MAC_CSUM_OFFLOAD
+#define TX_RECLAIM_JIFFIES (HZ / 5)
+
struct dma_descriptor {
struct dma_descriptor *next_dma_desc;
unsigned long start_addr;
@@ -68,6 +71,8 @@ struct bfin_mac_local {
int wol; /* Wake On Lan */
int irq_wake_requested;
+ struct timer_list tx_reclaim_timer;
+ struct net_device *ndev;
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 522de9f..a5dd81f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3086,7 +3086,6 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
- struct pci_dev *pdev = bp->pdev;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
@@ -3112,12 +3111,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
skb = rx_buf->skb;
prefetchw(skb);
- if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
- next_rx_buf =
- &rxr->rx_buf_ring[
- RX_RING_IDX(NEXT_RX_BD(sw_cons))];
- prefetch(next_rx_buf->desc);
- }
+ next_rx_buf =
+ &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(next_rx_buf->desc);
+
rx_buf->skb = NULL;
dma_addr = dma_unmap_addr(rx_buf, mapping);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 57ff5b3..29e293f 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -10982,6 +10982,9 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
int changed = 0;
int rc = 0;
+ if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
+ return -EINVAL;
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
printk(KERN_ERR "Handling parity error recovery. Try again later\n");
return -EAGAIN;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 0b28e01..631a624 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -2,16 +2,32 @@
# CAIF physical drivers
#
-if CAIF
-
comment "CAIF transport drivers"
config CAIF_TTY
tristate "CAIF TTY transport driver"
+ depends on CAIF
default n
---help---
The CAIF TTY transport driver is a Line Discipline (ldisc)
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
-endif # CAIF
+config CAIF_SPI_SLAVE
+ tristate "CAIF SPI transport driver for slave interface"
+ depends on CAIF
+ default n
+ ---help---
+ The CAIF Link layer SPI Protocol driver for Slave SPI interface.
+ This driver implements a platform driver to accommodate for a
+ platform specific SPI device. A sample CAIF SPI Platform device is
+ provided in Documentation/networking/caif/spi_porting.txt
+
+config CAIF_SPI_SYNC
+ bool "Next command and length in start of frame"
+ depends on CAIF_SPI_SLAVE
+ default n
+ ---help---
+ Putting the next command and length in the start of the frame can
+ help to synchronize to the next transfer in case of over or under-runs.
+ This option also needs to be enabled on the modem.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 52b6d1f..3a11d61 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,12 +1,10 @@
-ifeq ($(CONFIG_CAIF_DEBUG),1)
-CAIF_DBG_FLAGS := -DDEBUG
+ifeq ($(CONFIG_CAIF_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
endif
-KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
-
-ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
-clean-dirs:= .tmp_versions
-clean-files:= Module.symvers modules.order *.cmd *~ \
-
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
+
+# SPI slave physical interfaces module
+cfspi_slave-objs := caif_spi.o caif_spi_slave.o
+obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 3e706f0..3df0c0f 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -403,7 +403,6 @@ static void caifdev_setup(struct net_device *dev)
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CAIF_MAX_MTU;
- dev->hard_header_len = CAIF_NEEDED_HEADROOM;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
skb_queue_head_init(&serdev->head);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
new file mode 100644
index 0000000..03049e8
--- /dev/null
+++ b/drivers/net/caif/caif_spi.c
@@ -0,0 +1,847 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_spi.h>
+
+#ifndef CONFIG_CAIF_SPI_SYNC
+#define FLAVOR "Flavour: Vanilla.\n"
+#else
+#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
+#endif /* CONFIG_CAIF_SPI_SYNC */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
+MODULE_DESCRIPTION("CAIF SPI driver");
+
+static int spi_loop;
+module_param(spi_loop, bool, S_IRUGO);
+MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
+
+/* SPI frame alignment. */
+module_param(spi_frm_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
+
+/* SPI padding options. */
+module_param(spi_up_head_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
+
+module_param(spi_up_tail_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
+
+module_param(spi_down_head_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
+
+module_param(spi_down_tail_align, int, S_IRUGO);
+MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
+
+#ifdef CONFIG_ARM
+#define BYTE_HEX_FMT "%02X"
+#else
+#define BYTE_HEX_FMT "%02hhX"
+#endif
+
+#define SPI_MAX_PAYLOAD_SIZE 4096
+/*
+ * Threshold values for the SPI packet queue. Flowcontrol will be asserted
+ * when the number of packets exceeds HIGH_WATER_MARK. It will not be
+ * deasserted before the number of packets drops below LOW_WATER_MARK.
+ */
+#define LOW_WATER_MARK 100
+#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
+
+#ifdef CONFIG_UML
+
+/*
+ * We sometimes use UML for debugging, but it cannot handle
+ * dma_alloc_coherent so we have to wrap it.
+ */
+static inline void *dma_alloc(dma_addr_t *daddr)
+{
+ return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
+}
+
+static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+{
+ kfree(cpu_addr);
+}
+
+#else
+
+static inline void *dma_alloc(dma_addr_t *daddr)
+{
+ return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
+ GFP_KERNEL);
+}
+
+static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+{
+ dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
+}
+#endif /* CONFIG_UML */
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DEBUGFS_BUF_SIZE 4096
+
+static struct dentry *dbgfs_root;
+
+static inline void driver_debugfs_create(void)
+{
+ dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
+}
+
+static inline void driver_debugfs_remove(void)
+{
+ debugfs_remove(dbgfs_root);
+}
+
+static inline void dev_debugfs_rem(struct cfspi *cfspi)
+{
+ debugfs_remove(cfspi->dbgfs_frame);
+ debugfs_remove(cfspi->dbgfs_state);
+ debugfs_remove(cfspi->dbgfs_dir);
+}
+
+static int dbgfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int len = 0;
+ ssize_t size;
+ struct cfspi *cfspi = (struct cfspi *)file->private_data;
+
+ buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ /* Print out debug information. */
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "CAIF SPI debug information:\n");
+
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
+
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "STATE: %d\n", cfspi->dbg_state);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Previous CMD: 0x%x\n", cfspi->pcmd);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Current CMD: 0x%x\n", cfspi->cmd);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Previous TX len: %d\n", cfspi->tx_ppck_len);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Previous RX len: %d\n", cfspi->rx_ppck_len);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Current TX len: %d\n", cfspi->tx_cpck_len);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Current RX len: %d\n", cfspi->rx_cpck_len);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Next TX len: %d\n", cfspi->tx_npck_len);
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Next RX len: %d\n", cfspi->rx_npck_len);
+
+ size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return size;
+}
+
+static ssize_t print_frame(char *buf, size_t size, char *frm,
+ size_t count, size_t cut)
+{
+ int len = 0;
+ int i;
+ for (i = 0; i < count; i++) {
+ len += snprintf((buf + len), (size - len),
+ "[0x" BYTE_HEX_FMT "]",
+ frm[i]);
+ if ((i == cut) && (count > (cut * 2))) {
+ /* Fast forward. */
+ i = count - cut;
+ len += snprintf((buf + len), (size - len),
+ "--- %u bytes skipped ---\n",
+ (int)(count - (cut * 2)));
+ }
+
+ if ((!(i % 10)) && i) {
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "\n");
+ }
+ }
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
+ return len;
+}
+
+static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int len = 0;
+ ssize_t size;
+ struct cfspi *cfspi;
+
+ cfspi = (struct cfspi *)file->private_data;
+ buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ /* Print out debug information. */
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Current frame:\n");
+
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
+
+ len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
+ cfspi->xfer.va_tx,
+ (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
+
+ len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+ "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
+
+ len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
+ cfspi->xfer.va_rx,
+ (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
+
+ size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return size;
+}
+
+static const struct file_operations dbgfs_state_fops = {
+ .open = dbgfs_open,
+ .read = dbgfs_state,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations dbgfs_frame_fops = {
+ .open = dbgfs_open,
+ .read = dbgfs_frame,
+ .owner = THIS_MODULE
+};
+
+static inline void dev_debugfs_add(struct cfspi *cfspi)
+{
+ cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
+ cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO,
+ cfspi->dbgfs_dir, cfspi,
+ &dbgfs_state_fops);
+ cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
+ cfspi->dbgfs_dir, cfspi,
+ &dbgfs_frame_fops);
+}
+
+inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
+{
+ cfspi->dbg_state = state;
+};
+#else
+
+static inline void driver_debugfs_create(void)
+{
+}
+
+static inline void driver_debugfs_remove(void)
+{
+}
+
+static inline void dev_debugfs_add(struct cfspi *cfspi)
+{
+}
+
+static inline void dev_debugfs_rem(struct cfspi *cfspi)
+{
+}
+
+inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static LIST_HEAD(cfspi_list);
+static spinlock_t cfspi_list_lock;
+
+/* SPI uplink head alignment. */
+static ssize_t show_up_head_align(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", spi_up_head_align);
+}
+
+static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL);
+
+/* SPI uplink tail alignment. */
+static ssize_t show_up_tail_align(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", spi_up_tail_align);
+}
+
+static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL);
+
+/* SPI downlink head alignment. */
+static ssize_t show_down_head_align(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", spi_down_head_align);
+}
+
+static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL);
+
+/* SPI downlink tail alignment. */
+static ssize_t show_down_tail_align(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", spi_down_tail_align);
+}
+
+static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL);
+
+/* SPI frame alignment. */
+static ssize_t show_frame_align(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", spi_frm_align);
+}
+
+static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL);
+
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
+{
+ u8 *dst = buf;
+ caif_assert(buf);
+
+ do {
+ struct sk_buff *skb;
+ struct caif_payload_info *info;
+ int spad = 0;
+ int epad;
+
+ skb = skb_dequeue(&cfspi->chead);
+ if (!skb)
+ break;
+
+ /*
+ * Calculate length of frame including SPI padding.
+ * The payload position is found in the control buffer.
+ */
+ info = (struct caif_payload_info *)&skb->cb;
+
+ /*
+ * Compute head offset i.e. number of bytes to add to
+ * get the start of the payload aligned.
+ */
+ if (spi_up_head_align) {
+ spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ *dst = (u8)(spad - 1);
+ dst += spad;
+ }
+
+ /* Copy in CAIF frame. */
+ skb_copy_bits(skb, 0, dst, skb->len);
+ dst += skb->len;
+ cfspi->ndev->stats.tx_packets++;
+ cfspi->ndev->stats.tx_bytes += skb->len;
+
+ /*
+ * Compute tail offset i.e. number of bytes to add to
+ * get the complete CAIF frame aligned.
+ */
+ epad = (skb->len + spad) & spi_up_tail_align;
+ dst += epad;
+
+ dev_kfree_skb(skb);
+
+ } while ((dst - buf) < len);
+
+ return dst - buf;
+}
+
+int cfspi_xmitlen(struct cfspi *cfspi)
+{
+ struct sk_buff *skb = NULL;
+ int frm_len = 0;
+ int pkts = 0;
+
+ /*
+ * Decommit previously commited frames.
+ * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
+ */
+ while (skb_peek(&cfspi->chead)) {
+ skb = skb_dequeue_tail(&cfspi->chead);
+ skb_queue_head(&cfspi->qhead, skb);
+ }
+
+ do {
+ struct caif_payload_info *info = NULL;
+ int spad = 0;
+ int epad = 0;
+
+ skb = skb_dequeue(&cfspi->qhead);
+ if (!skb)
+ break;
+
+ /*
+ * Calculate length of frame including SPI padding.
+ * The payload position is found in the control buffer.
+ */
+ info = (struct caif_payload_info *)&skb->cb;
+
+ /*
+ * Compute head offset i.e. number of bytes to add to
+ * get the start of the payload aligned.
+ */
+ if (spi_up_head_align)
+ spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+
+ /*
+ * Compute tail offset i.e. number of bytes to add to
+ * get the complete CAIF frame aligned.
+ */
+ epad = (skb->len + spad) & spi_up_tail_align;
+
+ if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
+ skb_queue_tail(&cfspi->chead, skb);
+ pkts++;
+ frm_len += skb->len + spad + epad;
+ } else {
+ /* Put back packet. */
+ skb_queue_head(&cfspi->qhead, skb);
+ }
+ } while (pkts <= CAIF_MAX_SPI_PKTS);
+
+ /*
+ * Send flow on if previously sent flow off
+ * and now go below the low water mark
+ */
+ if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
+ cfspi->cfdev.flowctrl) {
+ cfspi->flow_off_sent = 0;
+ cfspi->cfdev.flowctrl(cfspi->ndev, 1);
+ }
+
+ return frm_len;
+}
+
+static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
+{
+ struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+
+ if (!in_interrupt())
+ spin_lock(&cfspi->lock);
+ if (assert) {
+ set_bit(SPI_SS_ON, &cfspi->state);
+ set_bit(SPI_XFER, &cfspi->state);
+ } else {
+ set_bit(SPI_SS_OFF, &cfspi->state);
+ }
+ if (!in_interrupt())
+ spin_unlock(&cfspi->lock);
+
+ /* Wake up the xfer thread. */
+ wake_up_interruptible(&cfspi->wait);
+}
+
+static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
+{
+ struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+
+ /* Transfer done, complete work queue */
+ complete(&cfspi->comp);
+}
+
+static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cfspi *cfspi = NULL;
+ unsigned long flags;
+ if (!dev)
+ return -EINVAL;
+
+ cfspi = netdev_priv(dev);
+
+ skb_queue_tail(&cfspi->qhead, skb);
+
+ spin_lock_irqsave(&cfspi->lock, flags);
+ if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
+ /* Wake up xfer thread. */
+ wake_up_interruptible(&cfspi->wait);
+ }
+ spin_unlock_irqrestore(&cfspi->lock, flags);
+
+ /* Send flow off if number of bytes is above high water mark */
+ if (!cfspi->flow_off_sent &&
+ cfspi->qhead.qlen > cfspi->qd_high_mark &&
+ cfspi->cfdev.flowctrl) {
+ cfspi->flow_off_sent = 1;
+ cfspi->cfdev.flowctrl(cfspi->ndev, 0);
+ }
+
+ return 0;
+}
+
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
+{
+ u8 *src = buf;
+
+ caif_assert(buf != NULL);
+
+ do {
+ int res;
+ struct sk_buff *skb = NULL;
+ int spad = 0;
+ int epad = 0;
+ u8 *dst = NULL;
+ int pkt_len = 0;
+
+ /*
+ * Compute head offset i.e. number of bytes added to
+ * get the start of the payload aligned.
+ */
+ if (spi_down_head_align) {
+ spad = 1 + *src;
+ src += spad;
+ }
+
+ /* Read length of CAIF frame (little endian). */
+ pkt_len = *src;
+ pkt_len |= ((*(src+1)) << 8) & 0xFF00;
+ pkt_len += 2; /* Add FCS fields. */
+
+ /* Get a suitable caif packet and copy in data. */
+
+ skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
+ caif_assert(skb != NULL);
+
+ dst = skb_put(skb, pkt_len);
+ memcpy(dst, src, pkt_len);
+ src += pkt_len;
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = cfspi->ndev;
+
+ /*
+ * Push received packet up the stack.
+ */
+ if (!spi_loop)
+ res = netif_rx_ni(skb);
+ else
+ res = cfspi_xmit(skb, cfspi->ndev);
+
+ if (!res) {
+ cfspi->ndev->stats.rx_packets++;
+ cfspi->ndev->stats.rx_bytes += pkt_len;
+ } else
+ cfspi->ndev->stats.rx_dropped++;
+
+ /*
+ * Compute tail offset i.e. number of bytes added to
+ * get the complete CAIF frame aligned.
+ */
+ epad = (pkt_len + spad) & spi_down_tail_align;
+ src += epad;
+ } while ((src - buf) < len);
+
+ return src - buf;
+}
+
+static int cfspi_open(struct net_device *dev)
+{
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int cfspi_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+static const struct net_device_ops cfspi_ops = {
+ .ndo_open = cfspi_open,
+ .ndo_stop = cfspi_close,
+ .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &cfspi_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+ dev->tx_queue_len = 0;
+ dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&cfspi->qhead);
+ skb_queue_head_init(&cfspi->chead);
+ cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+ cfspi->cfdev.use_frag = false;
+ cfspi->cfdev.use_stx = false;
+ cfspi->cfdev.use_fcs = false;
+ cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+ struct cfspi *cfspi = NULL;
+ struct net_device *ndev;
+ struct cfspi_dev *dev;
+ int res;
+ dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+ ndev = alloc_netdev(sizeof(struct cfspi),
+ "cfspi%d", cfspi_setup);
+ if (!dev)
+ return -ENODEV;
+
+ cfspi = netdev_priv(ndev);
+ netif_stop_queue(ndev);
+ cfspi->ndev = ndev;
+ cfspi->pdev = pdev;
+
+ /* Set flow info */
+ cfspi->flow_off_sent = 0;
+ cfspi->qd_low_mark = LOW_WATER_MARK;
+ cfspi->qd_high_mark = HIGH_WATER_MARK;
+
+ /* Assign the SPI device. */
+ cfspi->dev = dev;
+ /* Assign the device ifc to this SPI interface. */
+ dev->ifc = &cfspi->ifc;
+
+ /* Allocate DMA buffers. */
+ cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
+ if (!cfspi->xfer.va_tx) {
+ printk(KERN_WARNING
+ "CFSPI: failed to allocate dma TX buffer.\n");
+ res = -ENODEV;
+ goto err_dma_alloc_tx;
+ }
+
+ cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
+
+ if (!cfspi->xfer.va_rx) {
+ printk(KERN_WARNING
+ "CFSPI: failed to allocate dma TX buffer.\n");
+ res = -ENODEV;
+ goto err_dma_alloc_rx;
+ }
+
+ /* Initialize the work queue. */
+ INIT_WORK(&cfspi->work, cfspi_xfer);
+
+ /* Initialize spin locks. */
+ spin_lock_init(&cfspi->lock);
+
+ /* Initialize flow control state. */
+ cfspi->flow_stop = false;
+
+ /* Initialize wait queue. */
+ init_waitqueue_head(&cfspi->wait);
+
+ /* Create work thread. */
+ cfspi->wq = create_singlethread_workqueue(dev->name);
+ if (!cfspi->wq) {
+ printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
+ res = -ENODEV;
+ goto err_create_wq;
+ }
+
+ /* Initialize work queue. */
+ init_completion(&cfspi->comp);
+
+ /* Create debugfs entries. */
+ dev_debugfs_add(cfspi);
+
+ /* Set up the ifc. */
+ cfspi->ifc.ss_cb = cfspi_ss_cb;
+ cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
+ cfspi->ifc.priv = cfspi;
+
+ /* Add CAIF SPI device to list. */
+ spin_lock(&cfspi_list_lock);
+ list_add_tail(&cfspi->list, &cfspi_list);
+ spin_unlock(&cfspi_list_lock);
+
+ /* Schedule the work queue. */
+ queue_work(cfspi->wq, &cfspi->work);
+
+ /* Register network device. */
+ res = register_netdev(ndev);
+ if (res) {
+ printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
+ goto err_net_reg;
+ }
+ return res;
+
+ err_net_reg:
+ dev_debugfs_rem(cfspi);
+ set_bit(SPI_TERMINATE, &cfspi->state);
+ wake_up_interruptible(&cfspi->wait);
+ destroy_workqueue(cfspi->wq);
+ err_create_wq:
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+ dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
+ err_dma_alloc_tx:
+ free_netdev(ndev);
+
+ return res;
+}
+
+int cfspi_spi_remove(struct platform_device *pdev)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfspi *cfspi = NULL;
+ struct cfspi_dev *dev;
+
+ dev = (struct cfspi_dev *)pdev->dev.platform_data;
+ spin_lock(&cfspi_list_lock);
+ list_for_each_safe(list_node, n, &cfspi_list) {
+ cfspi = list_entry(list_node, struct cfspi, list);
+ /* Find the corresponding device. */
+ if (cfspi->dev == dev) {
+ /* Remove from list. */
+ list_del(list_node);
+ /* Free DMA buffers. */
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
+ set_bit(SPI_TERMINATE, &cfspi->state);
+ wake_up_interruptible(&cfspi->wait);
+ destroy_workqueue(cfspi->wq);
+ /* Destroy debugfs directory and files. */
+ dev_debugfs_rem(cfspi);
+ unregister_netdev(cfspi->ndev);
+ spin_unlock(&cfspi_list_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&cfspi_list_lock);
+ return -ENODEV;
+}
+
+static void __exit cfspi_exit_module(void)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfspi *cfspi = NULL;
+
+ list_for_each_safe(list_node, n, &cfspi_list) {
+ cfspi = list_entry(list_node, struct cfspi, list);
+ platform_device_unregister(cfspi->pdev);
+ }
+
+ /* Destroy sysfs files. */
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_up_head_align);
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_up_tail_align);
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_down_head_align);
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_down_tail_align);
+ driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
+ /* Unregister platform driver. */
+ platform_driver_unregister(&cfspi_spi_driver);
+ /* Destroy debugfs root directory. */
+ driver_debugfs_remove();
+}
+
+static int __init cfspi_init_module(void)
+{
+ int result;
+
+ /* Initialize spin lock. */
+ spin_lock_init(&cfspi_list_lock);
+
+ /* Register platform driver. */
+ result = platform_driver_register(&cfspi_spi_driver);
+ if (result) {
+ printk(KERN_ERR "Could not register platform SPI driver.\n");
+ goto err_dev_register;
+ }
+
+ /* Create sysfs files. */
+ result =
+ driver_create_file(&cfspi_spi_driver.driver,
+ &driver_attr_up_head_align);
+ if (result) {
+ printk(KERN_ERR "Sysfs creation failed 1.\n");
+ goto err_create_up_head_align;
+ }
+
+ result =
+ driver_create_file(&cfspi_spi_driver.driver,
+ &driver_attr_up_tail_align);
+ if (result) {
+ printk(KERN_ERR "Sysfs creation failed 2.\n");
+ goto err_create_up_tail_align;
+ }
+
+ result =
+ driver_create_file(&cfspi_spi_driver.driver,
+ &driver_attr_down_head_align);
+ if (result) {
+ printk(KERN_ERR "Sysfs creation failed 3.\n");
+ goto err_create_down_head_align;
+ }
+
+ result =
+ driver_create_file(&cfspi_spi_driver.driver,
+ &driver_attr_down_tail_align);
+ if (result) {
+ printk(KERN_ERR "Sysfs creation failed 4.\n");
+ goto err_create_down_tail_align;
+ }
+
+ result =
+ driver_create_file(&cfspi_spi_driver.driver,
+ &driver_attr_frame_align);
+ if (result) {
+ printk(KERN_ERR "Sysfs creation failed 5.\n");
+ goto err_create_frame_align;
+ }
+ driver_debugfs_create();
+ return result;
+
+ err_create_frame_align:
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_down_tail_align);
+ err_create_down_tail_align:
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_down_head_align);
+ err_create_down_head_align:
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_up_tail_align);
+ err_create_up_tail_align:
+ driver_remove_file(&cfspi_spi_driver.driver,
+ &driver_attr_up_head_align);
+ err_create_up_head_align:
+ err_dev_register:
+ return result;
+}
+
+module_init(cfspi_init_module);
+module_exit(cfspi_exit_module);
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
new file mode 100644
index 0000000..077ccf8
--- /dev/null
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <net/caif/caif_spi.h>
+
+#ifndef CONFIG_CAIF_SPI_SYNC
+#define SPI_DATA_POS SPI_CMD_SZ
+static inline int forward_to_spi_cmd(struct cfspi *cfspi)
+{
+ return cfspi->rx_cpck_len;
+}
+#else
+#define SPI_DATA_POS 0
+static inline int forward_to_spi_cmd(struct cfspi *cfspi)
+{
+ return 0;
+}
+#endif
+
+int spi_frm_align = 2;
+int spi_up_head_align = 1;
+int spi_up_tail_align;
+int spi_down_head_align = 3;
+int spi_down_tail_align = 1;
+
+#ifdef CONFIG_DEBUG_FS
+static inline void debugfs_store_prev(struct cfspi *cfspi)
+{
+ /* Store previous command for debugging reasons.*/
+ cfspi->pcmd = cfspi->cmd;
+ /* Store previous transfer. */
+ cfspi->tx_ppck_len = cfspi->tx_cpck_len;
+ cfspi->rx_ppck_len = cfspi->rx_cpck_len;
+}
+#else
+static inline void debugfs_store_prev(struct cfspi *cfspi)
+{
+}
+#endif
+
+void cfspi_xfer(struct work_struct *work)
+{
+ struct cfspi *cfspi;
+ u8 *ptr = NULL;
+ unsigned long flags;
+ int ret;
+ cfspi = container_of(work, struct cfspi, work);
+
+ /* Initialize state. */
+ cfspi->cmd = SPI_CMD_EOT;
+
+ for (;;) {
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
+
+ /* Wait for master talk or transmit event. */
+ wait_event_interruptible(cfspi->wait,
+ test_bit(SPI_XFER, &cfspi->state) ||
+ test_bit(SPI_TERMINATE, &cfspi->state));
+
+ if (test_bit(SPI_TERMINATE, &cfspi->state))
+ return;
+
+#if CFSPI_DBG_PREFILL
+ /* Prefill buffers for easier debugging. */
+ memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
+ memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
+#endif /* CFSPI_DBG_PREFILL */
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
+
+ /* Check whether we have a committed frame. */
+ if (cfspi->tx_cpck_len) {
+ int len;
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
+
+ /* Copy commited SPI frames after the SPI indication. */
+ ptr = (u8 *) cfspi->xfer.va_tx;
+ ptr += SPI_IND_SZ;
+ len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
+ WARN_ON(len != cfspi->tx_cpck_len);
+ }
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
+
+ /* Get length of next frame to commit. */
+ cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
+
+ WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
+
+ /*
+ * Add indication and length at the beginning of the frame,
+ * using little endian.
+ */
+ ptr = (u8 *) cfspi->xfer.va_tx;
+ *ptr++ = SPI_CMD_IND;
+ *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
+ *ptr++ = cfspi->tx_npck_len & 0x00FF;
+ *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
+
+ /* Calculate length of DMAs. */
+ cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
+ cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
+
+ /* Add SPI TX frame alignment padding, if necessary. */
+ if (cfspi->tx_cpck_len &&
+ (cfspi->xfer.tx_dma_len % spi_frm_align)) {
+
+ cfspi->xfer.tx_dma_len += spi_frm_align -
+ (cfspi->xfer.tx_dma_len % spi_frm_align);
+ }
+
+ /* Add SPI RX frame alignment padding, if necessary. */
+ if (cfspi->rx_cpck_len &&
+ (cfspi->xfer.rx_dma_len % spi_frm_align)) {
+
+ cfspi->xfer.rx_dma_len += spi_frm_align -
+ (cfspi->xfer.rx_dma_len % spi_frm_align);
+ }
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
+
+ /* Start transfer. */
+ ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
+ WARN_ON(ret);
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
+
+ /*
+ * TODO: We might be able to make an assumption if this is the
+ * first loop. Make sure that minimum toggle time is respected.
+ */
+ udelay(MIN_TRANSITION_TIME_USEC);
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
+
+ /* Signal that we are ready to recieve data. */
+ cfspi->dev->sig_xfer(true, cfspi->dev);
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
+
+ /* Wait for transfer completion. */
+ wait_for_completion(&cfspi->comp);
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
+
+ if (cfspi->cmd == SPI_CMD_EOT) {
+ /*
+ * Clear the master talk bit. A xfer is always at
+ * least two bursts.
+ */
+ clear_bit(SPI_SS_ON, &cfspi->state);
+ }
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
+
+ /* Make sure that the minimum toggle time is respected. */
+ if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
+ cfspi->dev->clk_mhz) <
+ MIN_TRANSITION_TIME_USEC) {
+
+ udelay(MIN_TRANSITION_TIME_USEC -
+ SPI_XFER_TIME_USEC
+ (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
+ }
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
+
+ /* De-assert transfer signal. */
+ cfspi->dev->sig_xfer(false, cfspi->dev);
+
+ /* Check whether we received a CAIF packet. */
+ if (cfspi->rx_cpck_len) {
+ int len;
+
+ cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
+
+ /* Parse SPI frame. */
+ ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
+
+ len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
+ WARN_ON(len != cfspi->rx_cpck_len);
+ }
+
+ /* Check the next SPI command and length. */
+ ptr = (u8 *) cfspi->xfer.va_rx;
+
+ ptr += forward_to_spi_cmd(cfspi);
+
+ cfspi->cmd = *ptr++;
+ cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
+ cfspi->rx_npck_len = *ptr++;
+ cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
+
+ WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
+ WARN_ON(cfspi->cmd > SPI_CMD_EOT);
+
+ debugfs_store_prev(cfspi);
+
+ /* Check whether the master issued an EOT command. */
+ if (cfspi->cmd == SPI_CMD_EOT) {
+ /* Reset state. */
+ cfspi->tx_cpck_len = 0;
+ cfspi->rx_cpck_len = 0;
+ } else {
+ /* Update state. */
+ cfspi->tx_cpck_len = cfspi->tx_npck_len;
+ cfspi->rx_cpck_len = cfspi->rx_npck_len;
+ }
+
+ /*
+ * Check whether we need to clear the xfer bit.
+ * Spin lock needed for packet insertion.
+ * Test and clear of different bits
+ * are not supported.
+ */
+ spin_lock_irqsave(&cfspi->lock, flags);
+ if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
+ && !test_bit(SPI_SS_ON, &cfspi->state))
+ clear_bit(SPI_XFER, &cfspi->state);
+
+ spin_unlock_irqrestore(&cfspi->lock, flags);
+ }
+}
+
+struct platform_driver cfspi_spi_driver = {
+ .probe = cfspi_spi_probe,
+ .remove = cfspi_spi_remove,
+ .driver = {
+ .name = "cfspi_sspi",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 908d89a..5ecf0bc 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -257,7 +257,7 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
- for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+ for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
return 0;
@@ -804,7 +804,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
cnic_free_dma(dev, &cp->conn_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
- cnic_free_dma(dev, &cp->kcq_info);
+ cnic_free_dma(dev, &cp->kcq1.dma);
kfree(cp->iscsi_tbl);
cp->iscsi_tbl = NULL;
kfree(cp->ctx_tbl);
@@ -863,6 +863,37 @@ static int cnic_alloc_context(struct cnic_dev *dev)
return 0;
}
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
+{
+ int err, i, is_bnx2 = 0;
+ struct kcqe **kcq;
+
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
+ is_bnx2 = 1;
+
+ err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
+ if (err)
+ return err;
+
+ kcq = (struct kcqe **) info->dma.pg_arr;
+ info->kcq = kcq;
+
+ if (is_bnx2)
+ return 0;
+
+ for (i = 0; i < KCQ_PAGE_CNT; i++) {
+ struct bnx2x_bd_chain_next *next =
+ (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
+ int j = i + 1;
+
+ if (j >= KCQ_PAGE_CNT)
+ j = 0;
+ next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
+ next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
+ }
+ return 0;
+}
+
static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -954,10 +985,9 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
goto error;
cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
- ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+ ret = cnic_alloc_kcq(dev, &cp->kcq1);
if (ret)
goto error;
- cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
ret = cnic_alloc_context(dev);
if (ret)
@@ -981,17 +1011,10 @@ error:
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- struct cnic_eth_dev *ethdev = cp->ethdev;
int ctx_blk_size = cp->ethdev->ctx_blk_size;
- int total_mem, blks, i, cid_space;
+ int total_mem, blks, i;
- if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
- return -EINVAL;
-
- cid_space = MAX_ISCSI_TBL_SZ +
- (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
-
- total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
+ total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
blks = total_mem / ctx_blk_size;
if (total_mem % ctx_blk_size)
blks++;
@@ -1035,16 +1058,27 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
+ cp->max_cid_space = MAX_ISCSI_TBL_SZ;
+ cp->iscsi_start_cid = start_cid;
+ if (start_cid < BNX2X_ISCSI_START_CID) {
+ u32 delta = BNX2X_ISCSI_START_CID - start_cid;
+
+ cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
+ cp->max_cid_space += delta;
+ }
+
cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
GFP_KERNEL);
if (!cp->iscsi_tbl)
goto error;
cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
- MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
+ cp->max_cid_space, GFP_KERNEL);
if (!cp->ctx_tbl)
goto error;
@@ -1053,7 +1087,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
}
- pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
+ pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
@@ -1061,7 +1095,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
return -ENOMEM;
n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
- for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+ for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
@@ -1072,22 +1106,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
j++;
}
- ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
+ ret = cnic_alloc_kcq(dev, &cp->kcq1);
if (ret)
goto error;
- cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
-
- for (i = 0; i < KCQ_PAGE_CNT; i++) {
- struct bnx2x_bd_chain_next *next =
- (struct bnx2x_bd_chain_next *)
- &cp->kcq[i][MAX_KCQE_CNT];
- int j = i + 1;
-
- if (j >= KCQ_PAGE_CNT)
- j = 0;
- next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
- next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
- }
pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
@@ -2120,18 +2141,20 @@ static u16 cnic_bnx2x_hw_idx(u16 idx)
return idx;
}
-static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
{
struct cnic_local *cp = dev->cnic_priv;
- u16 i, ri, last;
+ u16 i, ri, hw_prod, last;
struct kcqe *kcqe;
int kcqe_cnt = 0, last_cnt = 0;
- i = ri = last = *sw_prod;
+ i = ri = last = info->sw_prod_idx;
ri &= MAX_KCQ_IDX;
+ hw_prod = *info->hw_prod_idx_ptr;
+ hw_prod = cp->hw_idx(hw_prod);
while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
- kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+ kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
cp->completed_kcq[kcqe_cnt++] = kcqe;
i = cp->next_idx(i);
ri = i & MAX_KCQ_IDX;
@@ -2141,7 +2164,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
}
}
- *sw_prod = last;
+ info->sw_prod_idx = last;
return last_cnt;
}
@@ -2184,6 +2207,9 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp)
u16 tx_cons = *cp->tx_cons_ptr;
int comp = 0;
+ if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags))
+ return;
+
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
comp = cnic_l2_completion(cp);
@@ -2197,103 +2223,79 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp)
clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
}
-static int cnic_service_bnx2(void *data, void *status_blk)
+static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
{
- struct cnic_dev *dev = data;
- struct status_block *sblk = status_blk;
struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx = sblk->status_idx;
- u16 hw_prod, sw_prod;
+ u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
- if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
- return status_idx;
-
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
- hw_prod = sblk->status_completion_producer_index;
- sw_prod = cp->kcq_prod_idx;
- while (sw_prod != hw_prod) {
- kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
- if (kcqe_cnt == 0)
- goto done;
+ while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
service_kcqes(dev, kcqe_cnt);
/* Tell compiler that status_blk fields can change. */
barrier();
- if (status_idx != sblk->status_idx) {
- status_idx = sblk->status_idx;
+ if (status_idx != *cp->kcq1.status_idx_ptr) {
+ status_idx = (u16) *cp->kcq1.status_idx_ptr;
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
- hw_prod = sblk->status_completion_producer_index;
} else
break;
}
-done:
- CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
-
- cp->kcq_prod_idx = sw_prod;
+ CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
cnic_chk_pkt_rings(cp);
+
return status_idx;
}
-static void cnic_service_bnx2_msix(unsigned long data)
+static int cnic_service_bnx2(void *data, void *status_blk)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
+ struct cnic_dev *dev = data;
struct cnic_local *cp = dev->cnic_priv;
- struct status_block_msix *status_blk = cp->status_blk.bnx2;
- u32 status_idx = status_blk->status_idx;
- u16 hw_prod, sw_prod;
- int kcqe_cnt;
-
- cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+ u32 status_idx = *cp->kcq1.status_idx_ptr;
- hw_prod = status_blk->status_completion_producer_index;
- sw_prod = cp->kcq_prod_idx;
- while (sw_prod != hw_prod) {
- kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
- if (kcqe_cnt == 0)
- goto done;
-
- service_kcqes(dev, kcqe_cnt);
+ if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+ return status_idx;
- /* Tell compiler that status_blk fields can change. */
- barrier();
- if (status_idx != status_blk->status_idx) {
- status_idx = status_blk->status_idx;
- cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
- hw_prod = status_blk->status_completion_producer_index;
- } else
- break;
- }
+ return cnic_service_bnx2_queues(dev);
+}
-done:
- CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
- cp->kcq_prod_idx = sw_prod;
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+ struct cnic_dev *dev = (struct cnic_dev *) data;
+ struct cnic_local *cp = dev->cnic_priv;
- cnic_chk_pkt_rings(cp);
+ cp->last_status_idx = cnic_service_bnx2_queues(dev);
- cp->last_status_idx = status_idx;
CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
+static void cnic_doirq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
+
+ if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ prefetch(cp->status_blk.gen);
+ prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+ tasklet_schedule(&cp->cnic_irq_task);
+ }
+}
+
static irqreturn_t cnic_irq(int irq, void *dev_instance)
{
struct cnic_dev *dev = dev_instance;
struct cnic_local *cp = dev->cnic_priv;
- u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
if (cp->ack_int)
cp->ack_int(dev);
- prefetch(cp->status_blk.gen);
- prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
-
- if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
- tasklet_schedule(&cp->cnic_irq_task);
+ cnic_doirq(dev);
return IRQ_HANDLED;
}
@@ -2324,60 +2326,50 @@ static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
IGU_INT_DISABLE, 0);
}
-static void cnic_service_bnx2x_bh(unsigned long data)
+static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
- u16 hw_prod, sw_prod;
- struct cstorm_status_block_c *sblk =
- &cp->status_blk.bnx2x->c_status_block;
- u32 status_idx = sblk->status_block_index;
+ u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
- if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
- return;
-
- hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
- hw_prod = cp->hw_idx(hw_prod);
- sw_prod = cp->kcq_prod_idx;
- while (sw_prod != hw_prod) {
- kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
- if (kcqe_cnt == 0)
- goto done;
+ while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
/* Tell compiler that sblk fields can change. */
barrier();
- if (status_idx == sblk->status_block_index)
+ if (last_status == *info->status_idx_ptr)
break;
- status_idx = sblk->status_block_index;
- hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
- hw_prod = cp->hw_idx(hw_prod);
+ last_status = *info->status_idx_ptr;
}
+ return last_status;
+}
-done:
- CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
+static void cnic_service_bnx2x_bh(unsigned long data)
+{
+ struct cnic_dev *dev = (struct cnic_dev *) data;
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 status_idx;
+
+ if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+ return;
+
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+
+ CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
status_idx, IGU_INT_ENABLE, 1);
-
- cp->kcq_prod_idx = sw_prod;
}
static int cnic_service_bnx2x(void *data, void *status_blk)
{
struct cnic_dev *dev = data;
struct cnic_local *cp = dev->cnic_priv;
- u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
- if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
- prefetch(cp->status_blk.bnx2x);
- prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+ if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+ cnic_doirq(dev);
- tasklet_schedule(&cp->cnic_irq_task);
- cnic_chk_pkt_rings(cp);
- }
+ cnic_chk_pkt_rings(cp);
return 0;
}
@@ -2996,7 +2988,7 @@ err_out:
static int cnic_cm_abort(struct cnic_sock *csk)
{
struct cnic_local *cp = csk->dev->cnic_priv;
- u32 opcode;
+ u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
if (!cnic_in_use(csk))
return -EINVAL;
@@ -3008,12 +3000,9 @@ static int cnic_cm_abort(struct cnic_sock *csk)
* connect was not successful.
*/
- csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
- if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
- opcode = csk->state;
- else
- opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
cp->close_conn(csk, opcode);
+ if (csk->state != opcode)
+ return -EALREADY;
return 0;
}
@@ -3026,6 +3015,8 @@ static int cnic_cm_close(struct cnic_sock *csk)
if (cnic_close_prep(csk)) {
csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
return cnic_cm_close_req(csk);
+ } else {
+ return -EALREADY;
}
return 0;
}
@@ -3141,12 +3132,6 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
break;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
- if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
- cnic_cm_upcall(cp, csk, opcode);
- break;
- } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
- csk->state = opcode;
- /* fall through */
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
@@ -3202,19 +3187,22 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
{
- if ((opcode == csk->state) ||
- (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
- csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
- if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
- return 1;
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ /* Unsolicited RESET_COMP or RESET_RECEIVED */
+ opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ csk->state = opcode;
}
- /* 57710+ only workaround to handle unsolicited RESET_COMP
- * which will be treated like a RESET RCVD notification
- * which triggers the clean up procedure
+
+ /* 1. If event opcode matches the expected event in csk->state
+ * 2. If the expected event is CLOSE_COMP, we accept any event
+ * 3. If the expected event is 0, meaning the connection was never
+ * never established, we accept the opcode from cm_abort.
*/
- else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+ if (opcode == csk->state || csk->state == 0 ||
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
- csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ if (csk->state == 0)
+ csk->state = opcode;
return 1;
}
}
@@ -3226,8 +3214,14 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
+ if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
+ cnic_cm_upcall(cp, csk, opcode);
+ return;
+ }
+
clear_bit(SK_F_CONNECT_START, &csk->flags);
cnic_close_conn(csk);
+ csk->state = opcode;
cnic_cm_upcall(cp, csk, opcode);
}
@@ -3257,8 +3251,12 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
- if (cnic_ready_to_close(csk, opcode))
- cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+ if (cnic_ready_to_close(csk, opcode)) {
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+ else
+ close_complete = 1;
+ }
break;
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
@@ -3694,7 +3692,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct status_block *sblk = cp->status_blk.gen;
- u32 val;
+ u32 val, kcq_cid_addr, kwq_cid_addr;
int err;
cnic_set_bnx2_mac(dev);
@@ -3719,7 +3717,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cnic_init_context(dev, KWQ_CID);
cnic_init_context(dev, KCQ_CID);
- cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+ kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
cp->max_kwq_idx = MAX_KWQ_IDX;
@@ -3735,50 +3733,59 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
- cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
- cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
- cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
- cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
val = (u32) cp->kwq_info.pgtbl_map;
- cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+ kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+ cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
- cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
- cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+ cp->kcq1.sw_prod_idx = 0;
+ cp->kcq1.hw_prod_idx_ptr =
+ (u16 *) &sblk->status_completion_producer_index;
- cp->kcq_prod_idx = 0;
+ cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
- cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
- cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
- cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
- val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
- cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+ val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
- val = (u32) cp->kcq_info.pgtbl_map;
- cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+ val = (u32) cp->kcq1.dma.pgtbl_map;
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
cp->int_num = 0;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ struct status_block_msix *msblk = cp->status_blk.bnx2;
u32 sb_id = cp->status_blk_num;
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
+ cp->kcq1.hw_prod_idx_ptr =
+ (u16 *) &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
+ cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
- cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
- cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
}
/* Enable Commnad Scheduler notification when we write to the
@@ -3919,8 +3926,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
- context->xstorm_st_context.statistics_data = (cli |
- XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
+ if (cli < MAX_X_STAT_COUNTER_ID)
+ context->xstorm_st_context.statistics_data = cli |
+ XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE;
context->xstorm_ag_context.cdu_reserved =
CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
@@ -3928,10 +3936,12 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
ETH_CONNECTION_TYPE);
/* reset xstorm per client statistics */
- val = BAR_XSTRORM_INTMEM +
- XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
- for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
- CNIC_WR(dev, val + i * 4, 0);
+ if (cli < MAX_X_STAT_COUNTER_ID) {
+ val = BAR_XSTRORM_INTMEM +
+ XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
+ for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
+ CNIC_WR(dev, val + i * 4, 0);
+ }
cp->tx_cons_ptr =
&cp->bnx2x_def_status_blk->c_def_status_block.index_values[
@@ -3978,9 +3988,11 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
BNX2X_ISCSI_RX_SB_INDEX_NUM;
context->ustorm_st_context.common.clientId = cli;
context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
- context->ustorm_st_context.common.flags =
- USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
- context->ustorm_st_context.common.statistics_counter_id = cli;
+ if (cli < MAX_U_STAT_COUNTER_ID) {
+ context->ustorm_st_context.common.flags =
+ USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
+ context->ustorm_st_context.common.statistics_counter_id = cli;
+ }
context->ustorm_st_context.common.mc_alignment_log_size = 0;
context->ustorm_st_context.common.bd_buff_size =
cp->l2_single_buf_size;
@@ -4011,10 +4023,13 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
/* client tstorm info */
tstorm_client.mtu = cp->l2_single_buf_size - 14;
- tstorm_client.config_flags =
- (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE |
- TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE);
- tstorm_client.statistics_counter_id = cli;
+ tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE;
+
+ if (cli < MAX_T_STAT_COUNTER_ID) {
+ tstorm_client.config_flags |=
+ TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
+ tstorm_client.statistics_counter_id = cli;
+ }
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
@@ -4024,16 +4039,21 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
((u32 *)&tstorm_client)[1]);
/* reset tstorm per client statistics */
- val = BAR_TSTRORM_INTMEM +
- TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
- for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
- CNIC_WR(dev, val + i * 4, 0);
+ if (cli < MAX_T_STAT_COUNTER_ID) {
+
+ val = BAR_TSTRORM_INTMEM +
+ TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
+ for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
+ CNIC_WR(dev, val + i * 4, 0);
+ }
/* reset ustorm per client statistics */
- val = BAR_USTRORM_INTMEM +
- USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
- for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
- CNIC_WR(dev, val + i * 4, 0);
+ if (cli < MAX_U_STAT_COUNTER_ID) {
+ val = BAR_USTRORM_INTMEM +
+ USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
+ for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
+ CNIC_WR(dev, val + i * 4, 0);
+ }
cp->rx_cons_ptr =
&cp->bnx2x_def_status_blk->u_def_status_block.index_values[
@@ -4110,33 +4130,39 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
u8 sb_id = cp->status_blk_num;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
- BNX2X_ISCSI_START_CID);
+ cp->iscsi_start_cid);
if (ret)
return -ENOMEM;
- cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
+ cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
- cp->kcq_prod_idx = 0;
+ cp->kcq1.sw_prod_idx = 0;
+
+ cp->kcq1.hw_prod_idx_ptr =
+ &cp->status_blk.bnx2x->c_status_block.index_values[
+ HC_INDEX_C_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &cp->status_blk.bnx2x->c_status_block.status_block_index;
cnic_get_bnx2x_iscsi_info(dev);
/* Only 1 EQ */
- CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
+ CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
- cp->kcq_info.pg_map_arr[1] & 0xffffffff);
+ cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
- (u64) cp->kcq_info.pg_map_arr[1] >> 32);
+ (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
- cp->kcq_info.pg_map_arr[0] & 0xffffffff);
+ cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
- (u64) cp->kcq_info.pg_map_arr[0] >> 32);
+ (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
@@ -4364,7 +4390,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
0);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
- CNIC_WR16(dev, cp->kcq_io_addr, 0);
+ CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 08b1235..275c361 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -169,6 +169,16 @@ struct cnic_context {
} proto;
};
+struct kcq_info {
+ struct cnic_dma dma;
+ struct kcqe **kcq;
+
+ u16 *hw_prod_idx_ptr;
+ u16 sw_prod_idx;
+ u16 *status_idx_ptr;
+ u32 io_addr;
+};
+
struct cnic_local {
spinlock_t cnic_ulp_lock;
@@ -202,9 +212,6 @@ struct cnic_local {
u16 rx_cons;
u16 tx_cons;
- u32 kwq_cid_addr;
- u32 kcq_cid_addr;
-
struct cnic_dma kwq_info;
struct kwqe **kwq;
@@ -218,11 +225,7 @@ struct cnic_local {
u16 *kwq_con_idx_ptr;
u16 kwq_con_idx;
- struct cnic_dma kcq_info;
- struct kcqe **kcq;
-
- u16 kcq_prod_idx;
- u32 kcq_io_addr;
+ struct kcq_info kcq1;
union {
void *gen;
@@ -248,8 +251,10 @@ struct cnic_local {
struct cnic_iscsi *iscsi_tbl;
struct cnic_context *ctx_tbl;
struct cnic_id_tbl cid_tbl;
- int max_iscsi_conn;
atomic_t iscsi_conn;
+ u32 iscsi_start_cid;
+
+ u32 max_cid_space;
/* per connection parameters */
int num_iscsi_tasks;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 0c55177..344c842 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.1.2"
-#define CNIC_MODULE_RELDATE "May 26, 2010"
+#define CNIC_MODULE_VERSION "2.1.3"
+#define CNIC_MODULE_RELDATE "June 24, 2010"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 3c58db5..1756d28 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -964,7 +964,7 @@ static int cpmac_open(struct net_device *dev)
struct sk_buff *skb;
mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
- if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
+ if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to request registers\n",
dev->name);
@@ -972,7 +972,7 @@ static int cpmac_open(struct net_device *dev)
goto fail_reserve;
}
- priv->regs = ioremap(mem->start, mem->end - mem->start);
+ priv->regs = ioremap(mem->start, resource_size(mem));
if (!priv->regs) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to remap registers\n",
@@ -1049,7 +1049,7 @@ fail_alloc:
iounmap(priv->regs);
fail_remap:
- release_mem_region(mem->start, mem->end - mem->start);
+ release_mem_region(mem->start, resource_size(mem));
fail_reserve:
return res;
@@ -1077,7 +1077,7 @@ static int cpmac_stop(struct net_device *dev)
free_irq(dev->irq, dev);
iounmap(priv->regs);
mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
- release_mem_region(mem->start, mem->end - mem->start);
+ release_mem_region(mem->start, resource_size(mem));
priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
for (i = 0; i < priv->ring_size; i++) {
if (priv->rx_head[i].skb) {
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 9d0bd9d..8bda06e 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,10 +35,10 @@
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
-#define DRV_VERSION "1.1.3-ko"
+#define DRV_VERSION "1.1.4-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
-#define FW_VERSION_MINOR 4
+#define FW_VERSION_MINOR 10
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index dd1770e..62804bb 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -219,6 +219,10 @@ struct adapter_params {
struct vpd_params vpd;
struct pci_params pci;
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+ unsigned int sf_fw_start; /* start of FW image in flash */
+
unsigned int fw_vers;
unsigned int tp_vers;
u8 api_vers[7];
@@ -305,7 +309,6 @@ enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
- QUEUES_BOUND = (1 << 3),
FW_OK = (1 << 4),
};
@@ -646,6 +649,7 @@ void t4_intr_disable(struct adapter *adapter);
void t4_intr_clear(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
+int t4_wait_dev_ready(struct adapter *adap);
int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 58045b0..55a720e 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -77,6 +77,76 @@
*/
#define MAX_SGE_TIMERVAL 200U
+#ifdef CONFIG_PCI_IOV
+/*
+ * Virtual Function provisioning constants. We need two extra Ingress Queues
+ * with Interrupt capability to serve as the VF's Firmware Event Queue and
+ * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
+ * Lists associated with them). For each Ethernet/Control Egress Queue and
+ * for each Free List, we need an Egress Context.
+ */
+enum {
+ VFRES_NPORTS = 1, /* # of "ports" per VF */
+ VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
+
+ VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
+ VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
+ VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
+ VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
+ VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
+ VFRES_TC = 0, /* PCI-E traffic class */
+ VFRES_NEXACTF = 16, /* # of exact MPS filters */
+
+ VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
+ VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+};
+
+/*
+ * Provide a Port Access Rights Mask for the specified PF/VF. This is very
+ * static and likely not to be useful in the long run. We really need to
+ * implement some form of persistent configuration which the firmware
+ * controls.
+ */
+static unsigned int pfvfres_pmask(struct adapter *adapter,
+ unsigned int pf, unsigned int vf)
+{
+ unsigned int portn, portvec;
+
+ /*
+ * Give PF's access to all of the ports.
+ */
+ if (vf == 0)
+ return FW_PFVF_CMD_PMASK_MASK;
+
+ /*
+ * For VFs, we'll assign them access to the ports based purely on the
+ * PF. We assign active ports in order, wrapping around if there are
+ * fewer active ports than PFs: e.g. active port[pf % nports].
+ * Unfortunately the adapter's port_info structs haven't been
+ * initialized yet so we have to compute this.
+ */
+ if (adapter->params.nports == 0)
+ return 0;
+
+ portn = pf % adapter->params.nports;
+ portvec = adapter->params.portvec;
+ for (;;) {
+ /*
+ * Isolate the lowest set bit in the port vector. If we're at
+ * the port number that we want, return that as the pmask.
+ * otherwise mask that bit out of the port vector and
+ * decrement our port number ...
+ */
+ unsigned int pmask = portvec ^ (portvec & (portvec-1));
+ if (portn == 0)
+ return pmask;
+ portn--;
+ portvec &= ~pmask;
+ }
+ /*NOTREACHED*/
+}
+#endif
+
enum {
MEMWIN0_APERTURE = 65536,
MEMWIN0_BASE = 0x30000,
@@ -216,7 +286,7 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
void t4_os_portmod_changed(const struct adapter *adap, int port_id)
{
static const char *mod_str[] = {
- NULL, "LR", "SR", "ER", "passive DA", "active DA"
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
const struct net_device *dev = adap->port[port_id];
@@ -224,7 +294,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
netdev_info(dev, "port module unplugged\n");
- else
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
}
@@ -1234,7 +1304,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
{
unsigned int v = 0;
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
v |= SUPPORTED_TP;
if (caps & FW_PORT_CAP_SPEED_100M)
v |= SUPPORTED_100baseT_Full;
@@ -1250,7 +1321,10 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
v |= SUPPORTED_10000baseKX4_Full;
} else if (type == FW_PORT_TYPE_KR)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- else if (type == FW_PORT_TYPE_FIBER)
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
if (caps & FW_PORT_CAP_ANEG)
@@ -1276,13 +1350,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
const struct port_info *p = netdev_priv(dev);
if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
p->port_type == FW_PORT_TYPE_BT_XAUI)
cmd->port = PORT_TP;
- else if (p->port_type == FW_PORT_TYPE_FIBER)
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_TWINAX)
- cmd->port = PORT_DA;
- else
+ else if (p->port_type == FW_PORT_TYPE_SFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_FIBRE;
+ } else
cmd->port = PORT_OTHER;
if (p->mdio_addr >= 0) {
@@ -1719,14 +1799,7 @@ static int set_tso(struct net_device *dev, u32 value)
static int set_flags(struct net_device *dev, u32 flags)
{
- if (flags & ~ETH_FLAG_RXHASH)
- return -EOPNOTSUPP;
-
- if (flags & ETH_FLAG_RXHASH)
- dev->features |= NETIF_F_RXHASH;
- else
- dev->features &= ~NETIF_F_RXHASH;
- return 0;
+ return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
}
static struct ethtool_ops cxgb_ethtool_ops = {
@@ -2483,6 +2556,7 @@ static void cxgb_down(struct adapter *adapter)
t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
adapter->tid_release_task_busy = false;
+ adapter->tid_release_head = NULL;
if (adapter->flags & USING_MSIX) {
free_msix_queue_irqs(adapter);
@@ -2511,9 +2585,10 @@ static int cxgb_open(struct net_device *dev)
}
dev->real_num_tx_queues = pi->nqsets;
- link_start(dev);
- netif_tx_start_all_queues(dev);
- return 0;
+ err = link_start(dev);
+ if (!err)
+ netif_tx_start_all_queues(dev);
+ return err;
}
static int cxgb_close(struct net_device *dev)
@@ -2526,12 +2601,12 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, 0, pi->viid, false, false);
}
-static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev)
{
struct port_stats stats;
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
- struct net_device_stats *ns = &dev->stats;
+ struct rtnl_link_stats64 *ns = &dev->stats64;
spin_lock(&adapter->stats_lock);
t4_get_port_stats(adapter, p->tx_chan, &stats);
@@ -2674,7 +2749,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
.ndo_start_xmit = t4_eth_xmit,
- .ndo_get_stats = cxgb_get_stats,
+ .ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -2709,6 +2784,65 @@ static void setup_memwin(struct adapter *adap)
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
+static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
+{
+ u32 v;
+ int ret;
+
+ /* get device capabilities */
+ memset(c, 0, sizeof(*c));
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c->retval_len16 = htonl(FW_LEN16(*c));
+ ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
+ if (ret < 0)
+ return ret;
+
+ /* select capabilities we'll be using */
+ if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+ if (!vf_acls)
+ c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+ else
+ c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+ } else if (vf_acls) {
+ dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+ return ret;
+ }
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_config_glbl_rss(adap, 0,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_cfg_pfvf(adap, 0, 0, 0, MAX_EGRQ, 64, MAX_INGQ, 0, 0, 4,
+ 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ if (ret < 0)
+ return ret;
+
+ t4_sge_init(adap);
+
+ /* get basic stuff going */
+ ret = t4_early_init(adap, 0);
+ if (ret < 0)
+ return ret;
+
+ /* tweak some settings */
+ t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+ v = t4_read_reg(adap, TP_PIO_DATA);
+ t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ setup_memwin(adap);
+ return 0;
+}
+
/*
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
*/
@@ -2746,43 +2880,6 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
- /* get device capabilities */
- memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
- c.retval_len16 = htonl(FW_LEN16(c));
- ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
- if (ret < 0)
- goto bye;
-
- /* select capabilities we'll be using */
- if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adap->pdev_dev, "virtualization ACLs not supported");
- goto bye;
- }
- c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
- if (ret < 0)
- goto bye;
-
- ret = t4_config_glbl_rss(adap, 0,
- FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
- if (ret < 0)
- goto bye;
-
- ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
- FW_CMD_CAP_PF, FW_CMD_CAP_PF);
- if (ret < 0)
- goto bye;
-
for (v = 0; v < SGE_NTIMERS - 1; v++)
adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
@@ -2790,16 +2887,19 @@ static int adap_init0(struct adapter *adap)
for (v = 1; v < SGE_NCOUNTERS; v++)
adap->sge.counter_val[v] = min(intr_cnt[v - 1],
THRESHOLD_3_MASK);
- t4_sge_init(adap);
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
- /* get basic stuff going */
- ret = t4_early_init(adap, 0);
+ params[0] = FW_PARAM_DEV(CCLK);
+ ret = t4_query_params(adap, 0, 0, 0, 1, params, val);
if (ret < 0)
goto bye;
+ adap->params.vpd.cclk = val[0];
-#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ ret = adap_init1(adap, &c);
+ if (ret < 0)
+ goto bye;
#define FW_PARAM_PFVF(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
@@ -2853,6 +2953,18 @@ static int adap_init0(struct adapter *adap)
adap->vres.rq.size = val[3] - val[2] + 1;
adap->vres.pbl.start = val[4];
adap->vres.pbl.size = val[5] - val[4] + 1;
+
+ params[0] = FW_PARAM_PFVF(SQRQ_START);
+ params[1] = FW_PARAM_PFVF(SQRQ_END);
+ params[2] = FW_PARAM_PFVF(CQ_START);
+ params[3] = FW_PARAM_PFVF(CQ_END);
+ ret = t4_query_params(adap, 0, 0, 0, 4, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.qp.start = val[0];
+ adap->vres.qp.size = val[1] - val[0] + 1;
+ adap->vres.cq.start = val[2];
+ adap->vres.cq.size = val[3] - val[2] + 1;
}
if (c.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -2877,13 +2989,41 @@ static int adap_init0(struct adapter *adap)
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
- /* tweak some settings */
- t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
- t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
- t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
- v = t4_read_reg(adap, TP_PIO_DATA);
- t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
- setup_memwin(adap);
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Provision resource limits for Virtual Functions. We currently
+ * grant them all the same static resource limits except for the Port
+ * Access Rights Mask which we're assigning based on the PF. All of
+ * the static provisioning stuff for both the PF and VF really needs
+ * to be managed in a persistent manner for each device which the
+ * firmware controls.
+ */
+ {
+ int pf, vf;
+
+ for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+ if (num_vf[pf] <= 0)
+ continue;
+
+ /* VF numbering starts at 1! */
+ for (vf = 1; vf <= num_vf[pf]; vf++) {
+ ret = t4_cfg_pfvf(adap, 0, pf, vf,
+ VFRES_NEQ, VFRES_NETHCTRL,
+ VFRES_NIQFLINT, VFRES_NIQ,
+ VFRES_TC, VFRES_NVI,
+ FW_PFVF_CMD_CMASK_MASK,
+ pfvfres_pmask(adap, pf, vf),
+ VFRES_NEXACTF,
+ VFRES_R_CAPS, VFRES_WX_CAPS);
+ if (ret < 0)
+ dev_warn(adap->pdev_dev, "failed to "
+ "provision pf/vf=%d/%d; "
+ "err=%d\n", pf, vf, ret);
+ }
+ }
+ }
+#endif
+
return 0;
/*
@@ -2896,6 +3036,108 @@ bye: if (ret != -ETIMEDOUT && ret != -EIO)
return ret;
}
+/* EEH callbacks */
+
+static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ goto out;
+
+ rtnl_lock();
+ adap->flags &= ~FW_OK;
+ notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+ }
+ if (adap->flags & FULL_INIT_DONE)
+ cxgb_down(adap);
+ rtnl_unlock();
+ pci_disable_device(pdev);
+out: return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
+{
+ int i, ret;
+ struct fw_caps_config_cmd c;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap) {
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (t4_wait_dev_ready(adap) < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
+ return PCI_ERS_RESULT_DISCONNECT;
+ adap->flags |= FW_OK;
+ if (adap_init1(adap, &c))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
+ if (ret < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ p->viid = ret;
+ p->xact_addr_filt = -1;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ if (cxgb_up(adap))
+ return PCI_ERS_RESULT_DISCONNECT;
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void eeh_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ return;
+
+ rtnl_lock();
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ if (netif_running(dev)) {
+ link_start(dev);
+ cxgb_set_rxmode(dev);
+ }
+ netif_device_attach(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers cxgb4_eeh = {
+ .error_detected = eeh_err_detected,
+ .slot_reset = eeh_slot_reset,
+ .resume = eeh_resume,
+};
+
static inline bool is_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
@@ -3079,7 +3321,8 @@ static int __devinit enable_msix(struct adapter *adap)
static void __devinit print_port_info(struct adapter *adap)
{
static const char *base[] = {
- "R", "KX4", "T", "KX", "T", "KR", "CX4"
+ "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
+ "KX", "KR", "KR SFP+", "KR FEC"
};
int i;
@@ -3143,8 +3386,10 @@ static int __devinit init_one(struct pci_dev *pdev,
/* We control everything through PF 0 */
func = PCI_FUNC(pdev->devfn);
- if (func > 0)
+ if (func > 0) {
+ pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -3385,6 +3630,7 @@ static struct pci_driver cxgb4_driver = {
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = __devexit_p(remove_one),
+ .err_handler = &cxgb4_eeh,
};
static int __init cxgb4_init_module(void)
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 5b98546..0dc0866 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -185,6 +185,8 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range stag;
struct cxgb4_range rq;
struct cxgb4_range pbl;
+ struct cxgb4_range qp;
+ struct cxgb4_range cq;
};
/*
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index 9f96724..5b990d2 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -310,6 +310,13 @@ static void t4_l2e_free(struct l2t_entry *e)
neigh_release(e->neigh);
e->neigh = NULL;
}
+ while (e->arpq_head) {
+ struct sk_buff *skb = e->arpq_head;
+
+ e->arpq_head = skb->next;
+ kfree(skb);
+ }
+ e->arpq_tail = NULL;
}
spin_unlock_bh(&e->lock);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index d1f8f22..4388f72 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -931,7 +931,7 @@ out_free: dev_kfree_skb(skb);
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
- struct cpl_tx_pkt_lso *lso = (void *)wr;
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
@@ -1718,7 +1718,7 @@ static int process_responses(struct sge_rspq *q, int budget)
free_rx_bufs(q->adap, &rxq->fl, 1);
q->offset = 0;
}
- len &= RSPD_LEN;
+ len = RSPD_LEN(len);
}
si.tot_len = len;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index da272a9..3e63d14 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -221,6 +221,13 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
+ /*
+ * If the device is off-line, as in EEH, commands will time out.
+ * Fail them early so we don't waste time waiting.
+ */
+ if (adap->pdev->error_state != pci_channel_io_normal)
+ return -EIO;
+
v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
@@ -449,12 +456,10 @@ enum {
SF_RD_STATUS = 5, /* read status register */
SF_WR_ENABLE = 6, /* enable writes */
SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
- FW_START_SEC = 8, /* first flash sector for FW */
- FW_END_SEC = 15, /* last flash sector for FW */
- FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
- FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+ FW_MAX_SIZE = 512 * 1024,
};
/**
@@ -558,7 +563,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
{
int ret;
- if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+ if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
return -EINVAL;
addr = swab32(addr) | SF_RD_DATA_FAST;
@@ -596,7 +601,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
- if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
+ if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
val = swab32(addr) | SF_PROG_PAGE;
@@ -614,7 +619,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
if (ret)
goto unlock;
}
- ret = flash_wait_op(adapter, 5, 1);
+ ret = flash_wait_op(adapter, 8, 1);
if (ret)
goto unlock;
@@ -647,9 +652,8 @@ unlock:
*/
static int get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter,
- FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
}
/**
@@ -661,8 +665,8 @@ static int get_fw_version(struct adapter *adapter, u32 *vers)
*/
static int get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
- tp_microcode_ver),
+ return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
@@ -684,9 +688,9 @@ int t4_check_fw_version(struct adapter *adapter)
if (!ret)
ret = get_tp_version(adapter, &adapter->params.tp_vers);
if (!ret)
- ret = t4_read_flash(adapter,
- FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
- 2, api_vers, 1);
+ ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, intfver_nic),
+ 2, api_vers, 1);
if (ret)
return ret;
@@ -726,7 +730,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 0, 1,
SF_ERASE_SECTOR | (start << 8))) != 0 ||
- (ret = flash_wait_op(adapter, 5, 500)) != 0) {
+ (ret = flash_wait_op(adapter, 14, 500)) != 0) {
dev_err(adapter->pdev_dev,
"erase of flash sector %d failed, error %d\n",
start, ret);
@@ -754,6 +758,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+ unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int fw_img_start = adap->params.sf_fw_start;
+ unsigned int fw_start_sec = fw_img_start / sf_sec_size;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -784,8 +791,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
}
- i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
- ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
@@ -796,11 +803,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
- ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
- addr = FW_IMG_START;
+ addr = fw_img_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -810,7 +817,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
- FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
+ fw_img_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
@@ -1128,6 +1135,7 @@ static void cim_intr_handler(struct adapter *adapter)
static void ulprx_intr_handler(struct adapter *adapter)
{
static struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
{ 0x7fffff, "ULPRX parity error", -1, 1 },
{ 0 }
};
@@ -2510,7 +2518,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
c.retval_len16 = htonl(FW_LEN16(c));
c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
FW_PFVF_CMD_NIQ(rxq));
- c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
+ c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
FW_PFVF_CMD_PMASK(pmask) |
FW_PFVF_CMD_NEQ(txq));
c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
@@ -2572,7 +2580,7 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
- return ntohs(c.viid_pkd);
+ return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
}
/**
@@ -2595,7 +2603,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
FW_VI_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
- c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
+ c.type_viid = htons(FW_VI_CMD_VIID(viid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
}
@@ -3045,7 +3053,7 @@ static void __devinit init_link_config(struct link_config *lc,
}
}
-static int __devinit wait_dev_ready(struct adapter *adap)
+int t4_wait_dev_ready(struct adapter *adap)
{
if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
return 0;
@@ -3053,6 +3061,33 @@ static int __devinit wait_dev_ready(struct adapter *adap)
return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
}
+static int __devinit get_flash_params(struct adapter *adap)
+{
+ int ret;
+ u32 info;
+
+ ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adap, 3, 0, 1, &info);
+ t4_write_reg(adap, SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ adap->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ adap->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ adap->params.sf_size = 1 << info;
+ adap->params.sf_fw_start =
+ t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+ return 0;
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -3066,13 +3101,19 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
{
int ret;
- ret = wait_dev_ready(adapter);
+ ret = t4_wait_dev_ready(adapter);
if (ret < 0)
return ret;
get_pci_mode(adapter, &adapter->params.pci);
adapter->params.rev = t4_read_reg(adapter, PL_REV);
+ ret = get_flash_params(adapter);
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
+ return ret;
+ }
+
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
@@ -3122,12 +3163,13 @@ int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
+ adap->port[i]->dev_id = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
- p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
j++;
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 0256232..e875d09 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -57,8 +57,6 @@ enum {
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
- SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
- SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -69,6 +67,45 @@ enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+
+ SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
+ SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
+
+ SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
+
+ SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */
+ SGE_INTRDST_IQ = 1, /* destination is an ingress queue */
+
+ SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */
+ SGE_UPDATEDEL_INTR = 1, /* interrupt */
+ SGE_UPDATEDEL_STPG = 2, /* status page */
+ SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */
+
+ SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */
+ SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */
+ SGE_HOSTFCMODE_STPG = 2, /* sent to status page */
+ SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */
+
+ SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
+ SGE_FETCHBURSTMIN_32B = 1,
+ SGE_FETCHBURSTMIN_64B = 2,
+ SGE_FETCHBURSTMIN_128B = 3,
+
+ SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
+ SGE_FETCHBURSTMAX_128B = 1,
+ SGE_FETCHBURSTMAX_256B = 2,
+ SGE_FETCHBURSTMAX_512B = 3,
+
+ SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
+ SGE_CIDXFLUSHTHRESH_2 = 1,
+ SGE_CIDXFLUSHTHRESH_4 = 2,
+ SGE_CIDXFLUSHTHRESH_8 = 3,
+ SGE_CIDXFLUSHTHRESH_16 = 4,
+ SGE_CIDXFLUSHTHRESH_32 = 5,
+ SGE_CIDXFLUSHTHRESH_64 = 6,
+ SGE_CIDXFLUSHTHRESH_128 = 7,
+
+ SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
};
struct sge_qstat { /* data written to SGE queue status entries */
@@ -90,11 +127,13 @@ struct rsp_ctrl {
};
#define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN 0x7fffffffU
+#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
+#define RSPD_QID(x) RSPD_LEN(x)
#define RSPD_GEN(x) ((x) >> 7)
#define RSPD_TYPE(x) (((x) >> 4) & 3)
#define QINTR_CNT_EN 0x1
#define QINTR_TIMER_IDX(x) ((x) << 1)
+#define QINTR_TIMER_IDX_GET(x) (((x) << 1) & 0x7)
#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index 7a981b8..623932b 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -443,8 +443,7 @@ struct cpl_tx_pkt {
#define cpl_tx_pkt_xt cpl_tx_pkt
-struct cpl_tx_pkt_lso {
- WR_HDR;
+struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
#define LSO_TCPHDR_LEN(x) ((x) << 0)
#define LSO_IPHDR_LEN(x) ((x) << 4)
@@ -460,6 +459,12 @@ struct cpl_tx_pkt_lso {
/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
};
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ struct cpl_tx_pkt_lso_core c;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
struct cpl_iscsi_hdr {
union opcode_tid ot;
__be16 pdu_len_ddp;
@@ -623,6 +628,11 @@ struct cpl_fw6_msg {
__be64 data[4];
};
+/* cpl_fw6_msg.type values */
+enum {
+ FW6_TYPE_CMD_RPL = 0,
+};
+
enum {
ULP_TX_MEM_READ = 2,
ULP_TX_MEM_WRITE = 3,
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
index 5ed5648..bf21c14 100644
--- a/drivers/net/cxgb4/t4_regs.h
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -93,12 +93,15 @@
#define PKTSHIFT_MASK 0x00001c00U
#define PKTSHIFT_SHIFT 10
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
+#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
#define INGPCIEBOUNDARY_MASK 0x00000380U
#define INGPCIEBOUNDARY_SHIFT 7
#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
#define INGPADBOUNDARY_MASK 0x00000070U
#define INGPADBOUNDARY_SHIFT 4
#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
+#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
+ >> INGPADBOUNDARY_SHIFT)
#define EGRPCIEBOUNDARY_MASK 0x0000000eU
#define EGRPCIEBOUNDARY_SHIFT 1
#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
@@ -326,6 +329,9 @@
#define EDC_1_BASE_ADDR 0x7980
+#define CIM_BOOT_CFG 0x7b00
+#define BOOTADDR_MASK 0xffffff00U
+
#define CIM_PF_MAILBOX_DATA 0x240
#define CIM_PF_MAILBOX_CTRL 0x280
#define MBMSGVALID 0x00000008U
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 63991d6..ca45df8 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -71,6 +71,7 @@ struct fw_wr_hdr {
#define FW_WR_ATOMIC(x) ((x) << 23)
#define FW_WR_FLUSH(x) ((x) << 22)
#define FW_WR_COMPL(x) ((x) << 21)
+#define FW_WR_IMMDLEN_MASK 0xff
#define FW_WR_IMMDLEN(x) ((x) << 0)
#define FW_WR_EQUIQ (1U << 31)
@@ -447,7 +448,9 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
- FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A
+ FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
};
/*
@@ -475,7 +478,13 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+ FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
+ FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
+ FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
+ FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+ FW_PARAMS_PARAM_PFVF_VIID = 0x24,
+ FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
};
/*
@@ -512,7 +521,7 @@ struct fw_pfvf_cmd {
__be32 op_to_vfn;
__be32 retval_len16;
__be32 niqflint_niq;
- __be32 cmask_to_neq;
+ __be32 type_to_neq;
__be32 tc_to_nexactf;
__be32 r_caps_to_nethctrl;
__be16 nricq;
@@ -529,11 +538,16 @@ struct fw_pfvf_cmd {
#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_PFVF_CMD_TYPE (1 << 31)
+#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
+
#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
-#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf)
+#define FW_PFVF_CMD_CMASK_MASK 0xf
+#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
-#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf)
+#define FW_PFVF_CMD_PMASK_MASK 0xf
+#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
@@ -686,6 +700,7 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
@@ -804,16 +819,16 @@ struct fw_eq_ofld_cmd {
struct fw_vi_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
- __be16 viid_pkd;
+ __be16 type_viid;
u8 mac[6];
u8 portid_pkd;
u8 nmac;
u8 nmac0[6];
__be16 rsssize_pkd;
u8 nmac1[6];
- __be16 r7;
+ __be16 idsiiq_pkd;
u8 nmac2[6];
- __be16 r8;
+ __be16 idseiq_pkd;
u8 nmac3[6];
__be64 r9;
__be64 r10;
@@ -824,13 +839,16 @@ struct fw_vi_cmd {
#define FW_VI_CMD_ALLOC (1U << 31)
#define FW_VI_CMD_FREE (1U << 30)
#define FW_VI_CMD_VIID(x) ((x) << 0)
+#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
#define FW_VI_CMD_PORTID(x) ((x) << 4)
+#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
/* Special VI_MAC command index ids */
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
FW_VI_MAC_MPS_TCAM_ENTRY,
@@ -881,6 +899,7 @@ struct fw_vi_rxmode_cmd {
};
#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
@@ -1136,6 +1155,11 @@ struct fw_port_cmd {
__be32 lstatus_to_modtype;
__be16 pcap;
__be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 r9;
+ __be32 r10;
+ __be64 r11;
} info;
struct fw_port_ppp {
__be32 pppen_to_ncsich;
@@ -1161,6 +1185,7 @@ struct fw_port_cmd {
#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
#define FW_PORT_CMD_ACTION(x) ((x) << 16)
+#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
@@ -1196,14 +1221,17 @@ struct fw_port_cmd {
#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
enum fw_port_type {
- FW_PORT_TYPE_FIBER,
- FW_PORT_TYPE_KX4,
+ FW_PORT_TYPE_FIBER_XFI,
+ FW_PORT_TYPE_FIBER_XAUI,
FW_PORT_TYPE_BT_SGMII,
- FW_PORT_TYPE_KX,
+ FW_PORT_TYPE_BT_XFI,
FW_PORT_TYPE_BT_XAUI,
- FW_PORT_TYPE_KR,
+ FW_PORT_TYPE_KX4,
FW_PORT_TYPE_CX4,
- FW_PORT_TYPE_TWINAX,
+ FW_PORT_TYPE_KX,
+ FW_PORT_TYPE_KR,
+ FW_PORT_TYPE_SFP,
+ FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
@@ -1213,6 +1241,9 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_LR,
FW_PORT_MOD_TYPE_SR,
FW_PORT_MOD_TYPE_ER,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
+ FW_PORT_MOD_TYPE_LRM,
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
@@ -1469,6 +1500,7 @@ struct fw_rss_glb_config_cmd {
};
#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
@@ -1485,13 +1517,14 @@ struct fw_rss_vi_config_cmd {
} manual;
struct fw_rss_vi_config_basicvirtual {
__be32 r6;
- __be32 defaultq_to_ip4udpen;
+ __be32 defaultq_to_udpen;
#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
-#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0)
+#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0)
__be64 r9;
__be64 r10;
} basicvirtual;
diff --git a/drivers/net/cxgb4vf/Makefile b/drivers/net/cxgb4vf/Makefile
new file mode 100644
index 0000000..d72ee26
--- /dev/null
+++ b/drivers/net/cxgb4vf/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 SR-IOV Virtual Function Driver
+#
+
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
+
+cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
new file mode 100644
index 0000000..8ea0196
--- /dev/null
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -0,0 +1,540 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file should not be included directly. Include t4vf_common.h instead.
+ */
+
+#ifndef __CXGB4VF_ADAPTER_H__
+#define __CXGB4VF_ADAPTER_H__
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "../cxgb4/t4_hw.h"
+
+/*
+ * Constants of the implementation.
+ */
+enum {
+ MAX_NPORTS = 1, /* max # of "ports" */
+ MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
+ MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
+
+ /*
+ * MSI-X interrupt index usage.
+ */
+ MSIX_FW = 0, /* MSI-X index for firmware Q */
+ MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */
+ MSIX_EXTRAS = 1,
+ MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
+
+ /*
+ * The maximum number of Ingress and Egress Queues is determined by
+ * the maximum number of "Queue Sets" which we support plus any
+ * ancillary queues. Each "Queue Set" requires one Ingress Queue
+ * for RX Packet Ingress Event notifications and two Egress Queues for
+ * a Free List and an Ethernet TX list.
+ */
+ INGQ_EXTRAS = 2, /* firmware event queue and */
+ /* forwarded interrupts */
+ MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
+ MAX_EGRQ = MAX_ETH_QSETS*2,
+};
+
+/*
+ * Forward structure definition references.
+ */
+struct adapter;
+struct sge_eth_rxq;
+struct sge_rspq;
+
+/*
+ * Per-"port" information. This is really per-Virtual Interface information
+ * but the use of the "port" nomanclature makes it easier to go back and forth
+ * between the PF and VF drivers ...
+ */
+struct port_info {
+ struct adapter *adapter; /* our adapter */
+ struct vlan_group *vlan_grp; /* out VLAN group */
+ u16 viid; /* virtual interface ID */
+ s16 xact_addr_filt; /* index of our MAC address filter */
+ u16 rss_size; /* size of VI's RSS table slice */
+ u8 pidx; /* index into adapter port[] */
+ u8 port_id; /* physical port ID */
+ u8 rx_offload; /* CSO, etc. */
+ u8 nqsets; /* # of "Queue Sets" */
+ u8 first_qset; /* index of first "Queue Set" */
+ struct link_config link_cfg; /* physical port configuration */
+};
+
+/* port_info.rx_offload flags */
+enum {
+ RX_CSO = 1 << 0,
+};
+
+/*
+ * Scatter Gather Engine resources for the "adapter". Our ingress and egress
+ * queues are organized into "Queue Sets" with one ingress and one egress
+ * queue per Queue Set. These Queue Sets are aportionable between the "ports"
+ * (Virtual Interfaces). One extra ingress queue is used to receive
+ * asynchronous messages from the firmware. Note that the "Queue IDs" that we
+ * use here are really "Relative Queue IDs" which are returned as part of the
+ * firmware command to allocate queues. These queue IDs are relative to the
+ * absolute Queue ID base of the section of the Queue ID space allocated to
+ * the PF/VF.
+ */
+
+/*
+ * SGE free-list queue state.
+ */
+struct rx_sw_desc;
+struct sge_fl {
+ unsigned int avail; /* # of available RX buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long alloc_failed; /* # of buffer allocation failures */
+ unsigned long large_alloc_failed;
+ unsigned long starving; /* # of times FL was found starving */
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ unsigned int cntxt_id; /* SGE relative QID for the free list */
+ unsigned int abs_id; /* SGE absolute QID for the free list */
+ unsigned int size; /* capacity of free list */
+ struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
+ __be64 *desc; /* address of HW RX descriptor ring */
+ dma_addr_t addr; /* PCI bus address of hardware ring */
+};
+
+/*
+ * An ingress packet gather list.
+ */
+struct pkt_gl {
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
+ const struct pkt_gl *);
+
+/*
+ * State for an SGE Response Queue.
+ */
+struct sge_rspq {
+ struct napi_struct napi; /* NAPI scheduling control */
+ const __be64 *cur_desc; /* current descriptor in queue */
+ unsigned int cidx; /* consumer index */
+ u8 gen; /* current generation bit */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ int offset; /* offset into current FL buffer */
+
+ unsigned int unhandled_irqs; /* bogus interrupts */
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 idx; /* queue index within its group */
+ u16 cntxt_id; /* SGE rel QID for the response Q */
+ u16 abs_id; /* SGE abs QID for the response Q */
+ __be64 *desc; /* address of hardware response ring */
+ dma_addr_t phys_addr; /* PCI bus address of ring */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capcity of response Q */
+ struct adapter *adapter; /* our adapter */
+ struct net_device *netdev; /* associated net device */
+ rspq_handler_t handler; /* the handler for this response Q */
+};
+
+/*
+ * Ethernet queue statistics
+ */
+struct sge_eth_stats {
+ unsigned long pkts; /* # of ethernet packets */
+ unsigned long lro_pkts; /* # of LRO super packets */
+ unsigned long lro_merged; /* # of wire packets merged by LRO */
+ unsigned long rx_cso; /* # of Rx checksum offloads */
+ unsigned long vlan_ex; /* # of Rx VLAN extractions */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+};
+
+/*
+ * State for an Ethernet Receive Queue.
+ */
+struct sge_eth_rxq {
+ struct sge_rspq rspq; /* Response Queue */
+ struct sge_fl fl; /* Free List */
+ struct sge_eth_stats stats; /* receive statistics */
+};
+
+/*
+ * SGE Transmit Queue state. This contains all of the resources associated
+ * with the hardware status of a TX Queue which is a circular ring of hardware
+ * TX Descriptors. For convenience, it also contains a pointer to a parallel
+ * "Software Descriptor" array but we don't know anything about it here other
+ * than its type name.
+ */
+struct tx_desc {
+ /*
+ * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
+ * hardware: Sizes, Producer and Consumer indices, etc.
+ */
+ __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
+};
+struct tx_sw_desc;
+struct sge_txq {
+ unsigned int in_use; /* # of in-use TX descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long stops; /* # of times queue has been stopped */
+ unsigned long restarts; /* # of queue restarts */
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ unsigned int cntxt_id; /* SGE relative QID for the TX Q */
+ unsigned int abs_id; /* SGE absolute QID for the TX Q */
+ struct tx_desc *desc; /* address of HW TX descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ dma_addr_t phys_addr; /* PCI bus address of hardware ring */
+};
+
+/*
+ * State for an Ethernet Transmit Queue.
+ */
+struct sge_eth_txq {
+ struct sge_txq q; /* SGE TX Queue */
+ struct netdev_queue *txq; /* associated netdev TX queue */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long tx_cso; /* # of TX checksum offloads */
+ unsigned long vlan_ins; /* # of TX VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+};
+
+/*
+ * The complete set of Scatter/Gather Engine resources.
+ */
+struct sge {
+ /*
+ * Our "Queue Sets" ...
+ */
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+
+ /*
+ * Extra ingress queues for asynchronous firmware events and
+ * forwarded interrupts (when in MSI mode).
+ */
+ struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+ struct sge_rspq intrq ____cacheline_aligned_in_smp;
+ spinlock_t intrq_lock;
+
+ /*
+ * State for managing "starving Free Lists" -- Free Lists which have
+ * fallen below a certain threshold of buffers available to the
+ * hardware and attempts to refill them up to that threshold have
+ * failed. We have a regular "slow tick" timer process which will
+ * make periodic attempts to refill these starving Free Lists ...
+ */
+ DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+ struct timer_list rx_timer;
+
+ /*
+ * State for cleaning up completed TX descriptors.
+ */
+ struct timer_list tx_timer;
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u16 ethqsets; /* # of active Ethernet queue sets */
+ u16 ethtxq_rover; /* Tx queue to clean up next */
+ u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
+ u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
+
+ /*
+ * Reverse maps from Absolute Queue IDs to associated queue pointers.
+ * The absolute Queue IDs are in a compact range which start at a
+ * [potentially large] Base Queue ID. We perform the reverse map by
+ * first converting the Absolute Queue ID into a Relative Queue ID by
+ * subtracting off the Base Queue ID and then use a Relative Queue ID
+ * indexed table to get the pointer to the corresponding software
+ * queue structure.
+ */
+ unsigned int egr_base;
+ unsigned int ingr_base;
+ void *egr_map[MAX_EGRQ];
+ struct sge_rspq *ingr_map[MAX_INGQ];
+};
+
+/*
+ * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
+ * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
+ * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
+ */
+#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
+#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
+
+#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
+#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
+
+/*
+ * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
+ */
+#define for_each_ethrxq(sge, iter) \
+ for (iter = 0; iter < (sge)->ethqsets; iter++)
+
+/*
+ * Per-"adapter" (Virtual Function) information.
+ */
+struct adapter {
+ /* PCI resources */
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct device *pdev_dev;
+
+ /* "adapter" resources */
+ unsigned long registered_device_map;
+ unsigned long open_device_map;
+ unsigned long flags;
+ struct adapter_params params;
+
+ /* queue and interrupt resources */
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } msix_info[MSIX_ENTRIES];
+ struct sge sge;
+
+ /* Linux network device resources */
+ struct net_device *port[MAX_NPORTS];
+ const char *name;
+ unsigned int msg_enable;
+
+ /* debugfs resources */
+ struct dentry *debugfs_root;
+
+ /* various locks */
+ spinlock_t stats_lock;
+};
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1UL << 0),
+ USING_MSI = (1UL << 1),
+ USING_MSIX = (1UL << 2),
+ QUEUES_BOUND = (1UL << 3),
+};
+
+/*
+ * The following register read/write routine definitions are required by
+ * the common code.
+ */
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ return readl(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg - write a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ writel(val, adapter->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+ return readq(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+ u64 val)
+{
+ writeq(val, adapter->regs + reg_addr);
+}
+
+/**
+ * port_name - return the string name of a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Return the string name of the selected port.
+ */
+static inline const char *port_name(struct adapter *adapter, int pidx)
+{
+ return adapter->port[pidx]->name;
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
+ u8 hw_addr[])
+{
+ memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+ memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @pidx: the port index
+ *
+ * Return the port_info structure for the adapter.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
+{
+ return netdev_priv(adapter->port[pidx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->adapter;
+}
+
+/*
+ * OS "Callback" function declarations. These are functions that the OS code
+ * is "contracted" to provide for the common code.
+ */
+void t4vf_os_link_changed(struct adapter *, int, int);
+
+/*
+ * SGE function prototype declarations.
+ */
+int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
+ struct net_device *, int,
+ struct sge_fl *, rspq_handler_t);
+int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
+ struct net_device *, struct netdev_queue *,
+ unsigned int);
+void t4vf_free_sge_resources(struct adapter *);
+
+int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
+int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
+ const struct pkt_gl *);
+
+irq_handler_t t4vf_intr_handler(struct adapter *);
+irqreturn_t t4vf_sge_intr_msix(int, void *);
+
+int t4vf_sge_init(struct adapter *);
+void t4vf_sge_start(struct adapter *);
+void t4vf_sge_stop(struct adapter *);
+
+#endif /* __CXGB4VF_ADAPTER_H__ */
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
new file mode 100644
index 0000000..bd73ff5
--- /dev/null
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -0,0 +1,2906 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Generic information about the driver.
+ */
+#define DRV_VERSION "1.0.0"
+#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
+
+/*
+ * Module Parameters.
+ * ==================
+ */
+
+/*
+ * Default ethtool "message level" for adapters.
+ */
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable,
+ "default adapter ethtool message level bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X then MSI. This parameter determines which of these schemes the
+ * driver may consider as follows:
+ *
+ * msi = 2: choose from among MSI-X and MSI
+ * msi = 1: only consider MSI interrupts
+ *
+ * Note that unlike the Physical Function driver, this Virtual Function driver
+ * does _not_ support legacy INTx interrupts (this limitation is mandated by
+ * the PCI-E SR-IOV standard).
+ */
+#define MSI_MSIX 2
+#define MSI_MSI 1
+#define MSI_DEFAULT MSI_MSIX
+
+static int msi = MSI_DEFAULT;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
+
+/*
+ * Fundamental constants.
+ * ======================
+ */
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+
+ MIN_TXQ_ENTRIES = 32,
+ MIN_RSPQ_ENTRIES = 128,
+ MIN_FL_ENTRIES = 16,
+
+ /*
+ * For purposes of manipulating the Free List size we need to
+ * recognize that Free Lists are actually Egress Queues (the host
+ * produces free buffers which the hardware consumes), Egress Queues
+ * indices are all in units of Egress Context Units bytes, and free
+ * list entries are 64-bit PCI DMA addresses. And since the state of
+ * the Producer Index == the Consumer Index implies an EMPTY list, we
+ * always have at least one Egress Unit's worth of Free List entries
+ * unused. See sge.c for more details ...
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ MIN_FL_RESID = FL_PER_EQ_UNIT,
+};
+
+/*
+ * Global driver state.
+ * ====================
+ */
+
+static struct dentry *cxgb4vf_debugfs_root;
+
+/*
+ * OS "Callback" functions.
+ * ========================
+ */
+
+/*
+ * The link status has changed on the indicated "port" (Virtual Interface).
+ */
+void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
+{
+ struct net_device *dev = adapter->port[pidx];
+
+ /*
+ * If the port is disabled or the current recorded "link up"
+ * status matches the new status, just return.
+ */
+ if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
+ return;
+
+ /*
+ * Tell the OS that the link status has changed and print a short
+ * informative message on the console about the event.
+ */
+ if (link_ok) {
+ const char *s;
+ const char *fc;
+ const struct port_info *pi = netdev_priv(dev);
+
+ netif_carrier_on(dev);
+
+ switch (pi->link_cfg.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+
+ default:
+ s = "unknown";
+ break;
+ }
+
+ switch (pi->link_cfg.fc) {
+ case PAUSE_RX:
+ fc = "RX";
+ break;
+
+ case PAUSE_TX:
+ fc = "TX";
+ break;
+
+ case PAUSE_RX|PAUSE_TX:
+ fc = "RX/TX";
+ break;
+
+ default:
+ fc = "no";
+ break;
+ }
+
+ printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
+ dev->name, s, fc);
+ } else {
+ netif_carrier_off(dev);
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+}
+
+/*
+ * Net device operations.
+ * ======================
+ */
+
+/*
+ * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction
+ * based on whether the specified VLAN Group pointer is NULL or not.
+ */
+static void cxgb4vf_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ pi->vlan_grp = grp;
+ t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, grp != NULL, 0);
+}
+
+/*
+ * Perform the MAC and PHY actions needed to enable a "port" (Virtual
+ * Interface).
+ */
+static int link_start(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, -1,
+ true);
+ if (ret == 0) {
+ ret = t4vf_change_mac(pi->adapter, pi->viid,
+ pi->xact_addr_filt, dev->dev_addr, true);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+
+ /*
+ * We don't need to actually "start the link" itself since the
+ * firmware will do that for us when the first Virtual Interface
+ * is enabled on a port.
+ */
+ if (ret == 0)
+ ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
+ return ret;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adapter)
+{
+ int namelen = sizeof(adapter->msix_info[0].desc) - 1;
+ int pidx;
+
+ /*
+ * Firmware events.
+ */
+ snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
+ "%s-FWeventq", adapter->name);
+ adapter->msix_info[MSIX_FW].desc[namelen] = 0;
+
+ /*
+ * Ethernet queues.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *dev = adapter->port[pidx];
+ const struct port_info *pi = netdev_priv(dev);
+ int qs, msi;
+
+ for (qs = 0, msi = MSIX_NIQFLINT;
+ qs < pi->nqsets;
+ qs++, msi++) {
+ snprintf(adapter->msix_info[msi].desc, namelen,
+ "%s-%d", dev->name, qs);
+ adapter->msix_info[msi].desc[namelen] = 0;
+ }
+ }
+}
+
+/*
+ * Request all of our MSI-X resources.
+ */
+static int request_msix_queue_irqs(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int rxq, msi, err;
+
+ /*
+ * Firmware events.
+ */
+ err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
+ 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
+ if (err)
+ return err;
+
+ /*
+ * Ethernet queues.
+ */
+ msi = MSIX_NIQFLINT;
+ for_each_ethrxq(s, rxq) {
+ err = request_irq(adapter->msix_info[msi].vec,
+ t4vf_sge_intr_msix, 0,
+ adapter->msix_info[msi].desc,
+ &s->ethrxq[rxq].rspq);
+ if (err)
+ goto err_free_irqs;
+ msi++;
+ }
+ return 0;
+
+err_free_irqs:
+ while (--rxq >= 0)
+ free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
+ free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+ return err;
+}
+
+/*
+ * Free our MSI-X resources.
+ */
+static void free_msix_queue_irqs(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int rxq, msi;
+
+ free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+ msi = MSIX_NIQFLINT;
+ for_each_ethrxq(s, rxq)
+ free_irq(adapter->msix_info[msi++].vec,
+ &s->ethrxq[rxq].rspq);
+}
+
+/*
+ * Turn on NAPI and start up interrupts on a response queue.
+ */
+static void qenable(struct sge_rspq *rspq)
+{
+ napi_enable(&rspq->napi);
+
+ /*
+ * 0-increment the Going To Sleep register to start the timer and
+ * enable interrupts.
+ */
+ t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(0) |
+ SEINTARM(rspq->intr_params) |
+ INGRESSQID(rspq->cntxt_id));
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Receive Queues.
+ */
+static void enable_rx(struct adapter *adapter)
+{
+ int rxq;
+ struct sge *s = &adapter->sge;
+
+ for_each_ethrxq(s, rxq)
+ qenable(&s->ethrxq[rxq].rspq);
+ qenable(&s->fw_evtq);
+
+ /*
+ * The interrupt queue doesn't use NAPI so we do the 0-increment of
+ * its Going To Sleep register here to get it started.
+ */
+ if (adapter->flags & USING_MSI)
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(0) |
+ SEINTARM(s->intrq.intr_params) |
+ INGRESSQID(s->intrq.cntxt_id));
+
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int rxq;
+
+ for_each_ethrxq(s, rxq)
+ napi_disable(&s->ethrxq[rxq].rspq.napi);
+ napi_disable(&s->fw_evtq.napi);
+}
+
+/*
+ * Response queue handler for the firmware event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ /*
+ * Extract response opcode and get pointer to CPL message body.
+ */
+ struct adapter *adapter = rspq->adapter;
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+ void *cpl = (void *)(rsp + 1);
+
+ switch (opcode) {
+ case CPL_FW6_MSG: {
+ /*
+ * We've received an asynchronous message from the firmware.
+ */
+ const struct cpl_fw6_msg *fw_msg = cpl;
+ if (fw_msg->type == FW6_TYPE_CMD_RPL)
+ t4vf_handle_fw_rpl(adapter, fw_msg->data);
+ break;
+ }
+
+ case CPL_SGE_EGR_UPDATE: {
+ /*
+ * We've received an Egress Queue status update message.
+ * We get these, as the SGE is currently configured, when
+ * the firmware passes certain points in processing our
+ * TX Ethernet Queue. We use these updates to determine
+ * when we may need to restart a TX Ethernet Queue which
+ * was stopped for lack of free slots ...
+ */
+ const struct cpl_sge_egr_update *p = (void *)cpl;
+ unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+ struct sge *s = &adapter->sge;
+ struct sge_txq *tq;
+ struct sge_eth_txq *txq;
+ unsigned int eq_idx;
+ int hw_cidx, reclaimable, in_use;
+
+ /*
+ * Perform sanity checking on the Queue ID to make sure it
+ * really refers to one of our TX Ethernet Egress Queues which
+ * is active and matches the queue's ID. None of these error
+ * conditions should ever happen so we may want to either make
+ * them fatal and/or conditionalized under DEBUG.
+ */
+ eq_idx = EQ_IDX(s, qid);
+ if (unlikely(eq_idx >= MAX_EGRQ)) {
+ dev_err(adapter->pdev_dev,
+ "Egress Update QID %d out of range\n", qid);
+ break;
+ }
+ tq = s->egr_map[eq_idx];
+ if (unlikely(tq == NULL)) {
+ dev_err(adapter->pdev_dev,
+ "Egress Update QID %d TXQ=NULL\n", qid);
+ break;
+ }
+ txq = container_of(tq, struct sge_eth_txq, q);
+ if (unlikely(tq->abs_id != qid)) {
+ dev_err(adapter->pdev_dev,
+ "Egress Update QID %d refers to TXQ %d\n",
+ qid, tq->abs_id);
+ break;
+ }
+
+ /*
+ * Skip TX Queues which aren't stopped.
+ */
+ if (likely(!netif_tx_queue_stopped(txq->txq)))
+ break;
+
+ /*
+ * Skip stopped TX Queues which have more than half of their
+ * DMA rings occupied with unacknowledged writes.
+ */
+ hw_cidx = be16_to_cpu(txq->q.stat->cidx);
+ reclaimable = hw_cidx - txq->q.cidx;
+ if (reclaimable < 0)
+ reclaimable += txq->q.size;
+ in_use = txq->q.in_use - reclaimable;
+ if (in_use >= txq->q.size/2)
+ break;
+
+ /*
+ * Restart a stopped TX Queue which has less than half of its
+ * TX ring in use ...
+ */
+ txq->q.restarts++;
+ netif_tx_wake_queue(txq->txq);
+ break;
+ }
+
+ default:
+ dev_err(adapter->pdev_dev,
+ "unexpected CPL %#x on FW event queue\n", opcode);
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
+ * to use and initializes them. We support multiple "Queue Sets" per port if
+ * we have MSI-X, otherwise just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err, pidx, msix;
+
+ /*
+ * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
+ * state.
+ */
+ bitmap_zero(s->starving_fl, MAX_EGRQ);
+
+ /*
+ * If we're using MSI interrupt mode we need to set up a "forwarded
+ * interrupt" queue which we'll set up with our MSI vector. The rest
+ * of the ingress queues will be set up to forward their interrupts to
+ * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
+ * the intrq's queue ID as the interrupt forwarding queue for the
+ * subsequent calls ...
+ */
+ if (adapter->flags & USING_MSI) {
+ err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
+ adapter->port[0], 0, NULL, NULL);
+ if (err)
+ goto err_free_queues;
+ }
+
+ /*
+ * Allocate our ingress queue for asynchronous firmware messages.
+ */
+ err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
+ MSIX_FW, NULL, fwevtq_handler);
+ if (err)
+ goto err_free_queues;
+
+ /*
+ * Allocate each "port"'s initial Queue Sets. These can be changed
+ * later on ... up to the point where any interface on the adapter is
+ * brought up at which point lots of things get nailed down
+ * permanently ...
+ */
+ msix = MSIX_NIQFLINT;
+ for_each_port(adapter, pidx) {
+ struct net_device *dev = adapter->port[pidx];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+ int nqsets = (adapter->flags & USING_MSIX) ? pi->nqsets : 1;
+ int qs;
+
+ for (qs = 0; qs < nqsets; qs++, rxq++, txq++) {
+ err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
+ dev, msix++,
+ &rxq->fl, t4vf_ethrx_handler);
+ if (err)
+ goto err_free_queues;
+
+ err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
+ netdev_get_tx_queue(dev, qs),
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto err_free_queues;
+
+ rxq->rspq.idx = qs;
+ memset(&rxq->stats, 0, sizeof(rxq->stats));
+ }
+ }
+
+ /*
+ * Create the reverse mappings for the queues.
+ */
+ s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
+ s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
+ IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
+ for_each_port(adapter, pidx) {
+ struct net_device *dev = adapter->port[pidx];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+ int nqsets = (adapter->flags & USING_MSIX) ? pi->nqsets : 1;
+ int qs;
+
+ for (qs = 0; qs < nqsets; qs++, rxq++, txq++) {
+ IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
+ EQ_MAP(s, txq->q.abs_id) = &txq->q;
+
+ /*
+ * The FW_IQ_CMD doesn't return the Absolute Queue IDs
+ * for Free Lists but since all of the Egress Queues
+ * (including Free Lists) have Relative Queue IDs
+ * which are computed as Absolute - Base Queue ID, we
+ * can synthesize the Absolute Queue IDs for the Free
+ * Lists. This is useful for debugging purposes when
+ * we want to dump Queue Contexts via the PF Driver.
+ */
+ rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
+ EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
+ }
+ }
+ return 0;
+
+err_free_queues:
+ t4vf_free_sge_resources(adapter);
+ return err;
+}
+
+/*
+ * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
+ * queues. We configure the RSS CPU lookup table to distribute to the number
+ * of HW receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each "port" (Virtual
+ * Interface). We always configure the RSS mapping for all ports since the
+ * mapping table has plenty of entries.
+ */
+static int setup_rss(struct adapter *adapter)
+{
+ int pidx;
+
+ for_each_port(adapter, pidx) {
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+ u16 rss[MAX_PORT_QSETS];
+ int qs, err;
+
+ for (qs = 0; qs < pi->nqsets; qs++)
+ rss[qs] = rxq[qs].rspq.abs_id;
+
+ err = t4vf_config_rss_range(adapter, pi->viid,
+ 0, pi->rss_size, rss, pi->nqsets);
+ if (err)
+ return err;
+
+ /*
+ * Perform Global RSS Mode-specific initialization.
+ */
+ switch (adapter->params.rss.mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
+ /*
+ * If Tunnel All Lookup isn't specified in the global
+ * RSS Configuration, then we need to specify a
+ * default Ingress Queue for any ingress packets which
+ * aren't hashed. We'll use our first ingress queue
+ * ...
+ */
+ if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
+ union rss_vi_config config;
+ err = t4vf_read_rss_vi_config(adapter,
+ pi->viid,
+ &config);
+ if (err)
+ return err;
+ config.basicvirtual.defaultq =
+ rxq[0].rspq.abs_id;
+ err = t4vf_write_rss_vi_config(adapter,
+ pi->viid,
+ &config);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Bring the adapter up. Called whenever we go from no "ports" open to having
+ * one open. This function performs the actions necessary to make an adapter
+ * operational, such as completing the initialization of HW modules, and
+ * enabling interrupts. Must be called with the rtnl lock held. (Note that
+ * this is called "cxgb_up" in the PF Driver.)
+ */
+static int adapter_up(struct adapter *adapter)
+{
+ int err;
+
+ /*
+ * If this is the first time we've been called, perform basic
+ * adapter setup. Once we've done this, many of our adapter
+ * parameters can no longer be changed ...
+ */
+ if ((adapter->flags & FULL_INIT_DONE) == 0) {
+ err = setup_sge_queues(adapter);
+ if (err)
+ return err;
+ err = setup_rss(adapter);
+ if (err) {
+ t4vf_free_sge_resources(adapter);
+ return err;
+ }
+
+ if (adapter->flags & USING_MSIX)
+ name_msix_vecs(adapter);
+ adapter->flags |= FULL_INIT_DONE;
+ }
+
+ /*
+ * Acquire our interrupt resources. We only support MSI-X and MSI.
+ */
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ if (adapter->flags & USING_MSIX)
+ err = request_msix_queue_irqs(adapter);
+ else
+ err = request_irq(adapter->pdev->irq,
+ t4vf_intr_handler(adapter), 0,
+ adapter->name, adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * Enable NAPI ingress processing and return success.
+ */
+ enable_rx(adapter);
+ t4vf_sge_start(adapter);
+ return 0;
+}
+
+/*
+ * Bring the adapter down. Called whenever the last "port" (Virtual
+ * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
+ * Driver.)
+ */
+static void adapter_down(struct adapter *adapter)
+{
+ /*
+ * Free interrupt resources.
+ */
+ if (adapter->flags & USING_MSIX)
+ free_msix_queue_irqs(adapter);
+ else
+ free_irq(adapter->pdev->irq, adapter);
+
+ /*
+ * Wait for NAPI handlers to finish.
+ */
+ quiesce_rx(adapter);
+}
+
+/*
+ * Start up a net device.
+ */
+static int cxgb4vf_open(struct net_device *dev)
+{
+ int err;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ /*
+ * If this is the first interface that we're opening on the "adapter",
+ * bring the "adapter" up now.
+ */
+ if (adapter->open_device_map == 0) {
+ err = adapter_up(adapter);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Note that this interface is up and start everything up ...
+ */
+ dev->real_num_tx_queues = pi->nqsets;
+ set_bit(pi->port_id, &adapter->open_device_map);
+ link_start(dev);
+ netif_tx_start_all_queues(dev);
+ return 0;
+}
+
+/*
+ * Shut down a net device. This routine is called "cxgb_close" in the PF
+ * Driver ...
+ */
+static int cxgb4vf_stop(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+ pi->link_cfg.link_ok = 0;
+
+ clear_bit(pi->port_id, &adapter->open_device_map);
+ if (adapter->open_device_map == 0)
+ adapter_down(adapter);
+ return 0;
+}
+
+/*
+ * Translate our basic statistics into the standard "ifconfig" statistics.
+ */
+static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
+{
+ struct t4vf_port_stats stats;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct net_device_stats *ns = &dev->stats;
+ int err;
+
+ spin_lock(&adapter->stats_lock);
+ err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
+ spin_unlock(&adapter->stats_lock);
+
+ memset(ns, 0, sizeof(*ns));
+ if (err)
+ return ns;
+
+ ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
+ stats.tx_ucast_bytes + stats.tx_offload_bytes);
+ ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
+ stats.tx_ucast_frames + stats.tx_offload_frames);
+ ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
+ stats.rx_ucast_bytes);
+ ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
+ stats.rx_ucast_frames);
+ ns->multicast = stats.rx_mcast_frames;
+ ns->tx_errors = stats.tx_drop_frames;
+ ns->rx_errors = stats.rx_err_frames;
+
+ return ns;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
+ * array of addrss pointers and return the number collected.
+ */
+static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int maxaddrs)
+{
+ unsigned int naddr = 0;
+ const struct netdev_hw_addr *ha;
+
+ for_each_dev_addr(dev, ha) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
+ return naddr;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
+ * array of addrss pointers and return the number collected.
+ */
+static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int maxaddrs)
+{
+ unsigned int naddr = 0;
+ const struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
+ return naddr;
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+ u64 mhash = 0;
+ u64 uhash = 0;
+ bool free = true;
+ u16 filt_idx[7];
+ const u8 *addr[7];
+ int ret, naddr = 0;
+ const struct port_info *pi = netdev_priv(dev);
+
+ /* first do the secondary unicast addresses */
+ naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
+ if (naddr > 0) {
+ ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+ naddr, addr, filt_idx, &uhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ }
+
+ /* next set up the multicast addresses */
+ naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
+ if (naddr > 0) {
+ ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+ naddr, addr, filt_idx, &mhash, sleep);
+ if (ret < 0)
+ return ret;
+ }
+
+ return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
+ uhash | mhash, sleep);
+}
+
+/*
+ * Set RX properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ ret = set_addr_filters(dev, sleep_ok);
+ if (ret == 0)
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
+ (dev->flags & IFF_PROMISC) != 0,
+ (dev->flags & IFF_ALLMULTI) != 0,
+ 1, -1, sleep_ok);
+ return ret;
+}
+
+/*
+ * Set the current receive modes on the device.
+ */
+static void cxgb4vf_set_rxmode(struct net_device *dev)
+{
+ /* unfortunately we can't return errors to the stack */
+ set_rxmode(dev, -1, false);
+}
+
+/*
+ * Find the entry in the interrupt holdoff timer value array which comes
+ * closest to the specified interrupt holdoff value.
+ */
+static int closest_timer(const struct sge *s, int us)
+{
+ int i, timer_idx = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ int delta = us - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ timer_idx = i;
+ }
+ }
+ return timer_idx;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ pktcnt_idx = i;
+ }
+ }
+ return pktcnt_idx;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us. 0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adapter,
+ const struct sge_rspq *rspq)
+{
+ unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+
+ return timer_idx < SGE_NTIMERS
+ ? adapter->sge.timer_val[timer_idx]
+ : 0;
+}
+
+/**
+ * set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ * @adapter: the adapter
+ * @rspq: the RX response queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an RX response queue's interrupt hold-off time and packet count.
+ * At least one of the two needs to be enabled for the queue to generate
+ * interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
+ unsigned int us, unsigned int cnt)
+{
+ unsigned int timer_idx;
+
+ /*
+ * If both the interrupt holdoff timer and count are specified as
+ * zero, default to a holdoff count of 1 ...
+ */
+ if ((us | cnt) == 0)
+ cnt = 1;
+
+ /*
+ * If an interrupt holdoff count has been specified, then find the
+ * closest configured holdoff count and use that. If the response
+ * queue has already been created, then update its queue context
+ * parameters ...
+ */
+ if (cnt) {
+ int err;
+ u32 v, pktcnt_idx;
+
+ pktcnt_idx = closest_thres(&adapter->sge, cnt);
+ if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
+ v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
+ err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
+ if (err)
+ return err;
+ }
+ rspq->pktcnt_idx = pktcnt_idx;
+ }
+
+ /*
+ * Compute the closest holdoff timer index from the supplied holdoff
+ * timer value.
+ */
+ timer_idx = (us == 0
+ ? SGE_TIMER_RSTRT_CNTR
+ : closest_timer(&adapter->sge, us));
+
+ /*
+ * Update the response queue's interrupt coalescing parameters and
+ * return success.
+ */
+ rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
+ (cnt > 0 ? QINTR_CNT_EN : 0));
+ return 0;
+}
+
+/*
+ * Return a version number to identify the type of adapter. The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *adapter)
+{
+ /*
+ * Chip version 4, revision 0x3f (cxgb4vf).
+ */
+ return 4 | (0x3f << 10);
+}
+
+/*
+ * Execute the specified ioctl command.
+ */
+static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ /*
+ * The VF Driver doesn't have access to any of the other
+ * common Ethernet device ioctl()'s (like reading/writing
+ * PHY registers, etc.
+ */
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Change the device's MTU.
+ */
+static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* accommodate SACK */
+ if (new_mtu < 81)
+ return -EINVAL;
+
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
+ -1, -1, -1, -1, true);
+ if (!ret)
+ dev->mtu = new_mtu;
+ return ret;
+}
+
+/*
+ * Change the devices MAC address.
+ */
+static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
+{
+ int ret;
+ struct sockaddr *addr = _addr;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
+ addr->sa_data, true);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+/*
+ * Return a TX Queue on which to send the specified skb.
+ */
+static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ /*
+ * XXX For now just use the default hash but we probably want to
+ * XXX look at other possibilities ...
+ */
+ return skb_tx_hash(dev, skb);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Poll all of our receive queues. This is called outside of normal interrupt
+ * context.
+ */
+static void cxgb4vf_poll_controller(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (adapter->flags & USING_MSIX) {
+ struct sge_eth_rxq *rxq;
+ int nqsets;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ for (nqsets = pi->nqsets; nqsets; nqsets--) {
+ t4vf_sge_intr_msix(0, &rxq->rspq);
+ rxq++;
+ }
+ } else
+ t4vf_intr_handler(adapter)(0, adapter);
+}
+#endif
+
+/*
+ * Ethtool operations.
+ * ===================
+ *
+ * Note that we don't support any ethtool operations which change the physical
+ * state of the port to which we're linked.
+ */
+
+/*
+ * Return current port link settings.
+ */
+static int cxgb4vf_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ cmd->supported = pi->link_cfg.supported;
+ cmd->advertising = pi->link_cfg.advertising;
+ cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1;
+ cmd->duplex = DUPLEX_FULL;
+
+ cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->phy_address = pi->port_id;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = pi->link_cfg.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+/*
+ * Return our driver information.
+ */
+static void cxgb4vf_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strcpy(drvinfo->driver, KBUILD_MODNAME);
+ strcpy(drvinfo->version, DRV_VERSION);
+ strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u.%u.%u, TP %u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
+}
+
+/*
+ * Return current adapter message level.
+ */
+static u32 cxgb4vf_get_msglevel(struct net_device *dev)
+{
+ return netdev2adap(dev)->msg_enable;
+}
+
+/*
+ * Set current adapter message level.
+ */
+static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
+{
+ netdev2adap(dev)->msg_enable = msglevel;
+}
+
+/*
+ * Return the device's current Queue Set ring size parameters along with the
+ * allowed maximum values. Since ethtool doesn't understand the concept of
+ * multi-queue devices, we just return the current values associated with the
+ * first Queue Set.
+ */
+static void cxgb4vf_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *rp)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct sge *s = &pi->adapter->sge;
+
+ rp->rx_max_pending = MAX_RX_BUFFERS;
+ rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+ rp->rx_jumbo_max_pending = 0;
+ rp->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
+ rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+ rp->rx_jumbo_pending = 0;
+ rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+/*
+ * Set the Queue Set ring size parameters for the device. Again, since
+ * ethtool doesn't allow for the concept of multiple queues per device, we'll
+ * apply these new values across all of the Queue Sets associated with the
+ * device -- after vetting them of course!
+ */
+static int cxgb4vf_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *rp)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ int qs;
+
+ if (rp->rx_pending > MAX_RX_BUFFERS ||
+ rp->rx_jumbo_pending ||
+ rp->tx_pending > MAX_TXQ_ENTRIES ||
+ rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ rp->rx_pending < MIN_FL_ENTRIES ||
+ rp->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
+ s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
+ s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
+ s->ethtxq[qs].q.size = rp->tx_pending;
+ }
+ return 0;
+}
+
+/*
+ * Return the interrupt holdoff timer and count for the first Queue Set on the
+ * device. Our extension ioctl() (the cxgbtool interface) allows the
+ * interrupt holdoff timer to be read on all of the device's Queue Sets.
+ */
+static int cxgb4vf_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adapter = pi->adapter;
+ const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
+
+ coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
+ coalesce->rx_max_coalesced_frames =
+ ((rspq->intr_params & QINTR_CNT_EN)
+ ? adapter->sge.counter_val[rspq->pktcnt_idx]
+ : 0);
+ return 0;
+}
+
+/*
+ * Set the RX interrupt holdoff timer and count for the first Queue Set on the
+ * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
+ * the interrupt holdoff timer on any of the device's Queue Sets.
+ */
+static int cxgb4vf_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ return set_rxq_intr_params(adapter,
+ &adapter->sge.ethrxq[pi->first_qset].rspq,
+ coalesce->rx_coalesce_usecs,
+ coalesce->rx_max_coalesced_frames);
+}
+
+/*
+ * Report current port link pause parameter settings.
+ */
+static void cxgb4vf_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+ pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
+ pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+/*
+ * Return whether RX Checksum Offloading is currently enabled for the device.
+ */
+static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ return (pi->rx_offload & RX_CSO) != 0;
+}
+
+/*
+ * Turn RX Checksum Offloading on or off for the device.
+ */
+static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (csum)
+ pi->rx_offload |= RX_CSO;
+ else
+ pi->rx_offload &= ~RX_CSO;
+ return 0;
+}
+
+/*
+ * Identify the port by blinking the port's LED.
+ */
+static int cxgb4vf_phys_id(struct net_device *dev, u32 id)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ return t4vf_identify_port(pi->adapter, pi->viid, 5);
+}
+
+/*
+ * Port stats maintained per queue of the port.
+ */
+struct queue_port_stats {
+ u64 tso;
+ u64 tx_csum;
+ u64 rx_csum;
+ u64 vlan_ex;
+ u64 vlan_ins;
+};
+
+/*
+ * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
+ * these need to match the order of statistics returned by
+ * t4vf_get_port_stats().
+ */
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+ /*
+ * These must match the layout of the t4vf_port_stats structure.
+ */
+ "TxBroadcastBytes ",
+ "TxBroadcastFrames ",
+ "TxMulticastBytes ",
+ "TxMulticastFrames ",
+ "TxUnicastBytes ",
+ "TxUnicastFrames ",
+ "TxDroppedFrames ",
+ "TxOffloadBytes ",
+ "TxOffloadFrames ",
+ "RxBroadcastBytes ",
+ "RxBroadcastFrames ",
+ "RxMulticastBytes ",
+ "RxMulticastFrames ",
+ "RxUnicastBytes ",
+ "RxUnicastFrames ",
+ "RxErrorFrames ",
+
+ /*
+ * These are accumulated per-queue statistics and must match the
+ * order of the fields in the queue_port_stats structure.
+ */
+ "TSO ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "VLANextractions ",
+ "VLANinsertions ",
+};
+
+/*
+ * Return the number of statistics in the specified statistics set.
+ */
+static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+ /*NOTREACHED*/
+}
+
+/*
+ * Return the strings for the specified statistics set.
+ */
+static void cxgb4vf_get_strings(struct net_device *dev,
+ u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, stats_strings, sizeof(stats_strings));
+ break;
+ }
+}
+
+/*
+ * Small utility routine to accumulate queue statistics across the queues of
+ * a "port".
+ */
+static void collect_sge_port_stats(const struct adapter *adapter,
+ const struct port_info *pi,
+ struct queue_port_stats *stats)
+{
+ const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
+ const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+ int qs;
+
+ memset(stats, 0, sizeof(*stats));
+ for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+ stats->tso += txq->tso;
+ stats->tx_csum += txq->tx_cso;
+ stats->rx_csum += rxq->stats.rx_cso;
+ stats->vlan_ex += rxq->stats.vlan_ex;
+ stats->vlan_ins += txq->vlan_ins;
+ }
+}
+
+/*
+ * Return the ETH_SS_STATS statistics set.
+ */
+static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ int err = t4vf_get_port_stats(adapter, pi->pidx,
+ (struct t4vf_port_stats *)data);
+ if (err)
+ memset(data, 0, sizeof(struct t4vf_port_stats));
+
+ data += sizeof(struct t4vf_port_stats) / sizeof(u64);
+ collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return the size of our register map.
+ */
+static int cxgb4vf_get_regs_len(struct net_device *dev)
+{
+ return T4VF_REGMAP_SIZE;
+}
+
+/*
+ * Dump a block of registers, start to end inclusive, into a buffer.
+ */
+static void reg_block_dump(struct adapter *adapter, void *regbuf,
+ unsigned int start, unsigned int end)
+{
+ u32 *bp = regbuf + start - T4VF_REGMAP_START;
+
+ for ( ; start <= end; start += sizeof(u32)) {
+ /*
+ * Avoid reading the Mailbox Control register since that
+ * can trigger a Mailbox Ownership Arbitration cycle and
+ * interfere with communication with the firmware.
+ */
+ if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
+ *bp++ = 0xffff;
+ else
+ *bp++ = t4_read_reg(adapter, start);
+ }
+}
+
+/*
+ * Copy our entire register map into the provided buffer.
+ */
+static void cxgb4vf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs,
+ void *regbuf)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ regs->version = mk_adap_vers(adapter);
+
+ /*
+ * Fill in register buffer with our register map.
+ */
+ memset(regbuf, 0, T4VF_REGMAP_SIZE);
+
+ reg_block_dump(adapter, regbuf,
+ T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
+ T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
+ reg_block_dump(adapter, regbuf,
+ T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
+ T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
+ reg_block_dump(adapter, regbuf,
+ T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
+ T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
+ reg_block_dump(adapter, regbuf,
+ T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
+ T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
+
+ reg_block_dump(adapter, regbuf,
+ T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
+ T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
+}
+
+/*
+ * Report current Wake On LAN settings.
+ */
+static void cxgb4vf_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/*
+ * Set TCP Segmentation Offloading feature capabilities.
+ */
+static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
+{
+ if (tso)
+ dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ return 0;
+}
+
+static struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .get_settings = cxgb4vf_get_settings,
+ .get_drvinfo = cxgb4vf_get_drvinfo,
+ .get_msglevel = cxgb4vf_get_msglevel,
+ .set_msglevel = cxgb4vf_set_msglevel,
+ .get_ringparam = cxgb4vf_get_ringparam,
+ .set_ringparam = cxgb4vf_set_ringparam,
+ .get_coalesce = cxgb4vf_get_coalesce,
+ .set_coalesce = cxgb4vf_set_coalesce,
+ .get_pauseparam = cxgb4vf_get_pauseparam,
+ .get_rx_csum = cxgb4vf_get_rx_csum,
+ .set_rx_csum = cxgb4vf_set_rx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_link = ethtool_op_get_link,
+ .get_strings = cxgb4vf_get_strings,
+ .phys_id = cxgb4vf_phys_id,
+ .get_sset_count = cxgb4vf_get_sset_count,
+ .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
+ .get_regs_len = cxgb4vf_get_regs_len,
+ .get_regs = cxgb4vf_get_regs,
+ .get_wol = cxgb4vf_get_wol,
+ .set_tso = cxgb4vf_set_tso,
+};
+
+/*
+ * /sys/kernel/debug/cxgb4vf support code and data.
+ * ================================================
+ */
+
+/*
+ * Show SGE Queue Set information. We display QPL Queues Sets per line.
+ */
+#define QPL 4
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+ int qs, r = (uintptr_t)v - 1;
+
+ if (r)
+ seq_putc(seq, '\n');
+
+ #define S3(fmt_spec, s, v) \
+ do {\
+ seq_printf(seq, "%-12s", s); \
+ for (qs = 0; qs < n; ++qs) \
+ seq_printf(seq, " %16" fmt_spec, v); \
+ seq_putc(seq, '\n'); \
+ } while (0)
+ #define S(s, v) S3("s", s, v)
+ #define T(s, v) S3("u", s, txq[qs].v)
+ #define R(s, v) S3("u", s, rxq[qs].v)
+
+ if (r < eth_entries) {
+ const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+ const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+ int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+ S("QType:", "Ethernet");
+ S("Interface:",
+ (rxq[qs].rspq.netdev
+ ? rxq[qs].rspq.netdev->name
+ : "N/A"));
+ S3("d", "Port:",
+ (rxq[qs].rspq.netdev
+ ? ((struct port_info *)
+ netdev_priv(rxq[qs].rspq.netdev))->port_id
+ : -1));
+ T("TxQ ID:", q.abs_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ PIdx:", q.pidx);
+ T("TxQ CIdx:", q.cidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
+ S3("u", "Intr pktcnt:",
+ adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
+ R("RspQ CIdx:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ R("FL ID:", fl.abs_id);
+ R("FL size:", fl.size - MIN_FL_RESID);
+ R("FL avail:", fl.avail);
+ R("FL PIdx:", fl.pidx);
+ R("FL CIdx:", fl.cidx);
+ return 0;
+ }
+
+ r -= eth_entries;
+ if (r == 0) {
+ const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+ seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+ seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+ seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+ qtimer_val(adapter, evtq));
+ seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+ adapter->sge.counter_val[evtq->pktcnt_idx]);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+ } else if (r == 1) {
+ const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+ seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
+ seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
+ seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+ qtimer_val(adapter, intrq));
+ seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+ adapter->sge.counter_val[intrq->pktcnt_idx]);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
+ }
+
+ #undef R
+ #undef T
+ #undef S
+ #undef S3
+
+ return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file". We group the multi-Queue
+ * sections with QPL Queue Sets per "entry". The sections of the output are:
+ *
+ * Ethernet RX/TX Queue Sets
+ * Firmware Event Queue
+ * Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_queue_entries(const struct adapter *adapter)
+{
+ return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+ ((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+ int entries = sge_queue_entries(seq->private);
+
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int entries = sge_queue_entries(seq->private);
+
+ ++*pos;
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+ .start = sge_queue_start,
+ .next = sge_queue_next,
+ .stop = sge_queue_stop,
+ .show = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &sge_qinfo_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sge_qinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
+ */
+#define QPL 4
+
+static int sge_qstats_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+ int qs, r = (uintptr_t)v - 1;
+
+ if (r)
+ seq_putc(seq, '\n');
+
+ #define S3(fmt, s, v) \
+ do { \
+ seq_printf(seq, "%-16s", s); \
+ for (qs = 0; qs < n; ++qs) \
+ seq_printf(seq, " %8" fmt, v); \
+ seq_putc(seq, '\n'); \
+ } while (0)
+ #define S(s, v) S3("s", s, v)
+
+ #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
+ #define T(s, v) T3("lu", s, v)
+
+ #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
+ #define R(s, v) R3("lu", s, v)
+
+ if (r < eth_entries) {
+ const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+ const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+ int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+ S("QType:", "Ethernet");
+ S("Interface:",
+ (rxq[qs].rspq.netdev
+ ? rxq[qs].rspq.netdev->name
+ : "N/A"));
+ R3("u", "RspQNullInts", rspq.unhandled_irqs);
+ R("RxPackets:", stats.pkts);
+ R("RxCSO:", stats.rx_cso);
+ R("VLANxtract:", stats.vlan_ex);
+ R("LROmerged:", stats.lro_merged);
+ R("LROpackets:", stats.lro_pkts);
+ R("RxDrops:", stats.rx_drops);
+ T("TSO:", tso);
+ T("TxCSO:", tx_cso);
+ T("VLANins:", vlan_ins);
+ T("TxQFull:", q.stops);
+ T("TxQRestarts:", q.restarts);
+ T("TxMapErr:", mapping_err);
+ R("FLAllocErr:", fl.alloc_failed);
+ R("FLLrgAlcErr:", fl.large_alloc_failed);
+ R("FLStarving:", fl.starving);
+ return 0;
+ }
+
+ r -= eth_entries;
+ if (r == 0) {
+ const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+ seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
+ /* no real response queue statistics available to display */
+ seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
+ seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
+ } else if (r == 1) {
+ const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+ seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
+ /* no real response queue statistics available to display */
+ seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
+ seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
+ }
+
+ #undef R
+ #undef T
+ #undef S
+ #undef R3
+ #undef T3
+ #undef S3
+
+ return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file". We group the multi-Queue
+ * sections with QPL Queue Sets per "entry". The sections of the output are:
+ *
+ * Ethernet RX/TX Queue Sets
+ * Firmware Event Queue
+ * Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_qstats_entries(const struct adapter *adapter)
+{
+ return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+ ((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
+{
+ int entries = sge_qstats_entries(seq->private);
+
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_qstats_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int entries = sge_qstats_entries(seq->private);
+
+ (*pos)++;
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qstats_seq_ops = {
+ .start = sge_qstats_start,
+ .next = sge_qstats_next,
+ .stop = sge_qstats_stop,
+ .show = sge_qstats_show
+};
+
+static int sge_qstats_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &sge_qstats_seq_ops);
+
+ if (res == 0) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations sge_qstats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sge_qstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Show PCI-E SR-IOV Virtual Function Resource Limits.
+ */
+static int resources_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ #define S(desc, fmt, var) \
+ seq_printf(seq, "%-60s " fmt "\n", \
+ desc " (" #var "):", vfres->var)
+
+ S("Virtual Interfaces", "%d", nvi);
+ S("Egress Queues", "%d", neq);
+ S("Ethernet Control", "%d", nethctrl);
+ S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+ S("Ingress Queues", "%d", niq);
+ S("Traffic Class", "%d", tc);
+ S("Port Access Rights Mask", "%#x", pmask);
+ S("MAC Address Filters", "%d", nexactf);
+ S("Firmware Command Read Capabilities", "%#x", r_caps);
+ S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+ #undef S
+
+ return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = resources_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Show Virtual Interfaces.
+ */
+static int interfaces_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Interface Port VIID\n");
+ } else {
+ struct adapter *adapter = seq->private;
+ int pidx = (uintptr_t)v - 2;
+ struct net_device *dev = adapter->port[pidx];
+ struct port_info *pi = netdev_priv(dev);
+
+ seq_printf(seq, "%9s %4d %#5x\n",
+ dev->name, pi->port_id, pi->viid);
+ }
+ return 0;
+}
+
+static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
+{
+ return pos <= adapter->params.nports
+ ? (void *)(uintptr_t)(pos + 1)
+ : NULL;
+}
+
+static void *interfaces_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos
+ ? interfaces_get_idx(seq->private, *pos)
+ : SEQ_START_TOKEN;
+}
+
+static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return interfaces_get_idx(seq->private, *pos);
+}
+
+static void interfaces_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations interfaces_seq_ops = {
+ .start = interfaces_start,
+ .next = interfaces_next,
+ .stop = interfaces_stop,
+ .show = interfaces_show
+};
+
+static int interfaces_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &interfaces_seq_ops);
+
+ if (res == 0) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations interfaces_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = interfaces_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * /sys/kernel/debugfs/cxgb4vf/ files list.
+ */
+struct cxgb4vf_debugfs_entry {
+ const char *name; /* name of debugfs node */
+ mode_t mode; /* file system mode */
+ const struct file_operations *fops;
+};
+
+static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+ { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
+ { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
+ { "resources", S_IRUGO, &resources_proc_fops },
+ { "interfaces", S_IRUGO, &interfaces_proc_fops },
+};
+
+/*
+ * Module and device initialization and cleanup code.
+ * ==================================================
+ */
+
+/*
+ * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
+ * directory (debugfs_root) has already been set up.
+ */
+static int __devinit setup_debugfs(struct adapter *adapter)
+{
+ int i;
+
+ BUG_ON(adapter->debugfs_root == NULL);
+
+ /*
+ * Debugfs support is best effort.
+ */
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ (void)debugfs_create_file(debugfs_files[i].name,
+ debugfs_files[i].mode,
+ adapter->debugfs_root,
+ (void *)adapter,
+ debugfs_files[i].fops);
+
+ return 0;
+}
+
+/*
+ * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
+ * it to our caller to tear down the directory (debugfs_root).
+ */
+static void __devexit cleanup_debugfs(struct adapter *adapter)
+{
+ BUG_ON(adapter->debugfs_root == NULL);
+
+ /*
+ * Unlike our sister routine cleanup_proc(), we don't need to remove
+ * individual entries because a call will be made to
+ * debugfs_remove_recursive(). We just need to clean up any ancillary
+ * persistent state.
+ */
+ /* nothing to do */
+}
+
+/*
+ * Perform early "adapter" initialization. This is where we discover what
+ * adapter parameters we're going to be using and initialize basic adapter
+ * hardware support.
+ */
+static int adap_init0(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct sge_params *sge_params = &adapter->params.sge;
+ struct sge *s = &adapter->sge;
+ unsigned int ethqsets;
+ int err;
+
+ /*
+ * Wait for the device to become ready before proceeding ...
+ */
+ err = t4vf_wait_dev_ready(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "device didn't become ready:"
+ " err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+ err = t4vf_get_sge_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " SGE parameters: err=%d\n", err);
+ return err;
+ }
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
+ " err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Retrieve our RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adapter,
+ TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adapter,
+ TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adapter,
+ TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adapter,
+ TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adapter,
+ TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adapter,
+ TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
+
+ s->counter_val[0] =
+ THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[1] =
+ THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[2] =
+ THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[3] =
+ THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
+ " virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We need to reserve a number of the ingress queues with Free List
+ * and Interrupt capabilities for special interrupt purposes (like
+ * asynchronous firmware messages, or forwarded interrupts if we're
+ * using MSI). The rest of the FL/Intr-capable ingress queues will be
+ * matched up one-for-one with Ethernet/Control egress queues in order
+ * to form "Queue Sets" which will be aportioned between the "ports".
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ ethqsets = vfres->niqflint - INGQ_EXTRAS;
+ if (vfres->nethctrl != ethqsets) {
+ dev_warn(adapter->pdev_dev, "unequal number of [available]"
+ " ingress/egress queues (%d/%d); using minimum for"
+ " number of Queue Sets\n", ethqsets, vfres->nethctrl);
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ }
+ if (vfres->neq < ethqsets*2) {
+ dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
+ " to support Queue Sets (%d); reducing allowed Queue"
+ " Sets\n", vfres->neq, ethqsets);
+ ethqsets = vfres->neq/2;
+ }
+ if (ethqsets > MAX_ETH_QSETS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
+ " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
+ ethqsets = MAX_ETH_QSETS;
+ }
+ if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
+ dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
+ " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
+ }
+ adapter->sge.max_ethqsets = ethqsets;
+
+ /*
+ * Check for various parameter sanity issues. Most checks simply
+ * result in us using fewer resources than our provissioning but we
+ * do need at least one "port" with which to work ...
+ */
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+ if (adapter->params.nports == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
+ u8 pkt_cnt_idx, unsigned int size,
+ unsigned int iqe_size)
+{
+ rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+ rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
+ ? pkt_cnt_idx
+ : 0);
+ rspq->iqe_len = iqe_size;
+ rspq->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and
+ * type of ports we found and the number of available CPUs. Most settings can
+ * be modified by the admin via ethtool and cxgbtool prior to the adapter
+ * being brought up for the first time.
+ */
+static void __devinit cfg_queues(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int q10g, n10g, qidx, pidx, qs;
+
+ /*
+ * We should not be called till we know how many Queue Sets we can
+ * support. In particular, this means that we need to know what kind
+ * of interrupts we'll be using ...
+ */
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+
+ /*
+ * Count the number of 10GbE Virtual Interfaces that we have.
+ */
+ n10g = 0;
+ for_each_port(adapter, pidx)
+ n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+
+ /*
+ * We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g == 0)
+ q10g = 0;
+ else {
+ int n1g = (adapter->params.nports - n10g);
+ q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
+ if (q10g > num_online_cpus())
+ q10g = num_online_cpus();
+ }
+
+ /*
+ * Allocate the "Queue Sets" to the various Virtual Interfaces.
+ * The layout will be established in setup_sge_queues() when the
+ * adapter is brough up for the first time.
+ */
+ qidx = 0;
+ for_each_port(adapter, pidx) {
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+
+ pi->first_qset = qidx;
+ pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ qidx += pi->nqsets;
+ }
+ s->ethqsets = qidx;
+
+ /*
+ * Set up default Queue Set parameters ... Start off with the
+ * shortest interrupt holdoff timer.
+ */
+ for (qs = 0; qs < s->max_ethqsets; qs++) {
+ struct sge_eth_rxq *rxq = &s->ethrxq[qs];
+ struct sge_eth_txq *txq = &s->ethtxq[qs];
+
+ init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
+ rxq->fl.size = 72;
+ txq->q.size = 1024;
+ }
+
+ /*
+ * The firmware event queue is used for link state changes and
+ * notifications of TX DMA completions.
+ */
+ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
+ L1_CACHE_BYTES);
+
+ /*
+ * The forwarded interrupt queue is used when we're in MSI interrupt
+ * mode. In this mode all interrupts associated with RX queues will
+ * be forwarded to a single queue which we'll associate with our MSI
+ * interrupt vector. The messages dropped in the forwarded interrupt
+ * queue will indicate which ingress queue needs servicing ... This
+ * queue needs to be large enough to accommodate all of the ingress
+ * queues which are forwarding their interrupt (+1 to prevent the PIDX
+ * from equalling the CIDX if every ingress queue has an outstanding
+ * interrupt). The queue doesn't need to be any larger because no
+ * ingress queue will ever have more than one outstanding interrupt at
+ * any time ...
+ */
+ init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
+ L1_CACHE_BYTES);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adapter, int n)
+{
+ int i;
+ struct port_info *pi;
+
+ /*
+ * While we have too many active Ether Queue Sets, interate across the
+ * "ports" and reduce their individual Queue Set allocations.
+ */
+ BUG_ON(n < adapter->params.nports);
+ while (n < adapter->sge.ethqsets)
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->nqsets > 1) {
+ pi->nqsets--;
+ adapter->sge.ethqsets--;
+ if (adapter->sge.ethqsets <= n)
+ break;
+ }
+ }
+
+ /*
+ * Reassign the starting Queue Sets for each of the "ports" ...
+ */
+ n = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ pi->first_qset = n;
+ n += pi->nqsets;
+ }
+}
+
+/*
+ * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
+ * we get a separate MSI-X vector for every "Queue Set" plus any extras we
+ * need. Minimally we need one for every Virtual Interface plus those needed
+ * for our "extras". Note that this process may lower the maximum number of
+ * allowed Queue Sets ...
+ */
+static int __devinit enable_msix(struct adapter *adapter)
+{
+ int i, err, want, need;
+ struct msix_entry entries[MSIX_ENTRIES];
+ struct sge *s = &adapter->sge;
+
+ for (i = 0; i < MSIX_ENTRIES; ++i)
+ entries[i].entry = i;
+
+ /*
+ * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
+ * plus those needed for our "extras" (for example, the firmware
+ * message queue). We _need_ at least one "Queue Set" per Virtual
+ * Interface plus those needed for our "extras". So now we get to see
+ * if the song is right ...
+ */
+ want = s->max_ethqsets + MSIX_EXTRAS;
+ need = adapter->params.nports + MSIX_EXTRAS;
+ while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
+ want = err;
+
+ if (err == 0) {
+ int nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
+ }
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+ } else if (err > 0) {
+ pci_disable_msix(adapter->pdev);
+ dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
+ " not using MSI-X\n", err);
+ }
+ return err;
+}
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops cxgb4vf_netdev_ops = {
+ .ndo_open = cxgb4vf_open,
+ .ndo_stop = cxgb4vf_stop,
+ .ndo_start_xmit = t4vf_eth_xmit,
+ .ndo_get_stats = cxgb4vf_get_stats,
+ .ndo_set_rx_mode = cxgb4vf_set_rxmode,
+ .ndo_set_mac_address = cxgb4vf_set_mac_addr,
+ .ndo_select_queue = cxgb4vf_select_queue,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = cxgb4vf_do_ioctl,
+ .ndo_change_mtu = cxgb4vf_change_mtu,
+ .ndo_vlan_rx_register = cxgb4vf_vlan_rx_register,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb4vf_poll_controller,
+#endif
+};
+#endif
+
+/*
+ * "Probe" a device: initialize a device and construct all kernel and driver
+ * state needed to manage the device. This routine is called "init_one" in
+ * the PF Driver ...
+ */
+static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int version_printed;
+
+ int pci_using_dac;
+ int err, pidx;
+ unsigned int pmask;
+ struct adapter *adapter;
+ struct port_info *pi;
+ struct net_device *netdev;
+
+ /*
+ * Vet our module parameters.
+ */
+ if (msi != MSI_MSIX && msi != MSI_MSI) {
+ dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
+ " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
+ MSI_MSI);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /*
+ * Print our driver banner the first time we're called to initialize a
+ * device.
+ */
+ if (version_printed == 0) {
+ printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+ version_printed = 1;
+ }
+
+ /*
+ * Reserve PCI resources for the device. If we can't get them some
+ * other driver may have already claimed the device ...
+ */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ return err;
+ }
+
+ /*
+ * Initialize generic PCI device state.
+ */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_release_regions;
+ }
+
+ /*
+ * Set up our DMA mask: try for 64-bit address masking first and
+ * fall back to 32-bit if we can't get 64 bits ...
+ */
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err == 0) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
+ " coherent allocations\n");
+ goto err_disable_device;
+ }
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err != 0) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_disable_device;
+ }
+ pci_using_dac = 0;
+ }
+
+ /*
+ * Enable bus mastering for the device ...
+ */
+ pci_set_master(pdev);
+
+ /*
+ * Allocate our adapter data structure and attach it to the device.
+ */
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+ pci_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+
+ /*
+ * Initialize SMP data synchronization resources.
+ */
+ spin_lock_init(&adapter->stats_lock);
+
+ /*
+ * Map our I/O registers in BAR0.
+ */
+ adapter->regs = pci_ioremap_bar(pdev, 0);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto err_free_adapter;
+ }
+
+ /*
+ * Initialize adapter level features.
+ */
+ adapter->name = pci_name(pdev);
+ adapter->msg_enable = dflt_msg_enable;
+ err = adap_init0(adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ /*
+ * Allocate our "adapter ports" and stitch everything together.
+ */
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, pidx) {
+ int port_id, viid;
+
+ /*
+ * We simplistically allocate our virtual interfaces
+ * sequentially across the port numbers to which we have
+ * access rights. This should be configurable in some manner
+ * ...
+ */
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+ viid = t4vf_alloc_vi(adapter, port_id);
+ if (viid < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", port_id, viid);
+ err = viid;
+ goto err_free_dev;
+ }
+
+ /*
+ * Allocate our network device and stitch things together.
+ */
+ netdev = alloc_etherdev_mq(sizeof(struct port_info),
+ MAX_PORT_QSETS);
+ if (netdev == NULL) {
+ dev_err(&pdev->dev, "cannot allocate netdev for"
+ " port %d\n", port_id);
+ t4vf_free_vi(adapter, viid);
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+ adapter->port[pidx] = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->pidx = pidx;
+ pi->port_id = port_id;
+ pi->viid = viid;
+
+ /*
+ * Initialize the starting state of our "port" and register
+ * it.
+ */
+ pi->xact_addr_filt = -1;
+ pi->rx_offload = RX_CSO;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ netdev->irq = pdev->irq;
+
+ netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_GRO);
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features =
+ (netdev->features &
+ ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
+
+#ifdef HAVE_NET_DEVICE_OPS
+ netdev->netdev_ops = &cxgb4vf_netdev_ops;
+#else
+ netdev->vlan_rx_register = cxgb4vf_vlan_rx_register;
+ netdev->open = cxgb4vf_open;
+ netdev->stop = cxgb4vf_stop;
+ netdev->hard_start_xmit = t4vf_eth_xmit;
+ netdev->get_stats = cxgb4vf_get_stats;
+ netdev->set_rx_mode = cxgb4vf_set_rxmode;
+ netdev->do_ioctl = cxgb4vf_do_ioctl;
+ netdev->change_mtu = cxgb4vf_change_mtu;
+ netdev->set_mac_address = cxgb4vf_set_mac_addr;
+ netdev->select_queue = cxgb4vf_select_queue;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = cxgb4vf_poll_controller;
+#endif
+#endif
+ SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+
+ /*
+ * Initialize the hardware/software state for the port.
+ */
+ err = t4vf_port_init(adapter, pidx);
+ if (err) {
+ dev_err(&pdev->dev, "cannot initialize port %d\n",
+ pidx);
+ goto err_free_dev;
+ }
+ }
+
+ /*
+ * The "card" is now ready to go. If any errors occur during device
+ * registration we do not fail the whole "card" but rather proceed
+ * only with the ports we manage to register successfully. However we
+ * must register at least one net device.
+ */
+ for_each_port(adapter, pidx) {
+ netdev = adapter->port[pidx];
+ if (netdev == NULL)
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_warn(&pdev->dev, "cannot register net device %s,"
+ " skipping\n", netdev->name);
+ continue;
+ }
+
+ set_bit(pidx, &adapter->registered_device_map);
+ }
+ if (adapter->registered_device_map == 0) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ goto err_free_dev;
+ }
+
+ /*
+ * Set up our debugfs entries.
+ */
+ if (cxgb4vf_debugfs_root) {
+ adapter->debugfs_root =
+ debugfs_create_dir(pci_name(pdev),
+ cxgb4vf_debugfs_root);
+ if (adapter->debugfs_root == NULL)
+ dev_warn(&pdev->dev, "could not create debugfs"
+ " directory");
+ else
+ setup_debugfs(adapter);
+ }
+
+ /*
+ * See what interrupts we'll be using. If we've been configured to
+ * use MSI-X interrupts, try to enable them but fall back to using
+ * MSI interrupts if we can't enable MSI-X interrupts. If we can't
+ * get MSI interrupts we bail with the error.
+ */
+ if (msi == MSI_MSIX && enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else {
+ err = pci_enable_msi(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
+ " err=%d\n",
+ msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
+ goto err_free_debugfs;
+ }
+ adapter->flags |= USING_MSI;
+ }
+
+ /*
+ * Now that we know how many "ports" we have and what their types are,
+ * and how many Queue Sets we can support, we can configure our queue
+ * resources.
+ */
+ cfg_queues(adapter);
+
+ /*
+ * Print a short notice on the existance and configuration of the new
+ * VF network device ...
+ */
+ for_each_port(adapter, pidx) {
+ dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
+ adapter->port[pidx]->name,
+ (adapter->flags & USING_MSIX) ? "MSI-X" :
+ (adapter->flags & USING_MSI) ? "MSI" : "");
+ }
+
+ /*
+ * Return success!
+ */
+ return 0;
+
+ /*
+ * Error recovery and exit code. Unwind state that's been created
+ * so far and return the error.
+ */
+
+err_free_debugfs:
+ if (adapter->debugfs_root) {
+ cleanup_debugfs(adapter);
+ debugfs_remove_recursive(adapter->debugfs_root);
+ }
+
+err_free_dev:
+ for_each_port(adapter, pidx) {
+ netdev = adapter->port[pidx];
+ if (netdev == NULL)
+ continue;
+ pi = netdev_priv(netdev);
+ t4vf_free_vi(adapter, pi->viid);
+ if (test_bit(pidx, &adapter->registered_device_map))
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ }
+
+err_unmap_bar:
+ iounmap(adapter->regs);
+
+err_free_adapter:
+ kfree(adapter);
+ pci_set_drvdata(pdev, NULL);
+
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_clear_master(pdev);
+
+err_release_regions:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+err_out:
+ return err;
+}
+
+/*
+ * "Remove" a device: tear down all kernel and driver state created in the
+ * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
+ * that this is called "remove_one" in the PF Driver.)
+ */
+static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ /*
+ * Tear down driver state associated with device.
+ */
+ if (adapter) {
+ int pidx;
+
+ /*
+ * Stop all of our activity. Unregister network port,
+ * disable interrupts, etc.
+ */
+ for_each_port(adapter, pidx)
+ if (test_bit(pidx, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[pidx]);
+ t4vf_sge_stop(adapter);
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+
+ /*
+ * Tear down our debugfs entries.
+ */
+ if (adapter->debugfs_root) {
+ cleanup_debugfs(adapter);
+ debugfs_remove_recursive(adapter->debugfs_root);
+ }
+
+ /*
+ * Free all of the various resources which we've acquired ...
+ */
+ t4vf_free_sge_resources(adapter);
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev = adapter->port[pidx];
+ struct port_info *pi;
+
+ if (netdev == NULL)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_free_vi(adapter, pi->viid);
+ free_netdev(netdev);
+ }
+ iounmap(adapter->regs);
+ kfree(adapter);
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ /*
+ * Disable the device and release its PCI resources.
+ */
+ pci_disable_device(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+}
+
+/*
+ * PCI Device registration data structures.
+ */
+#define CH_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+
+static struct pci_device_id cxgb4vf_pci_tbl[] = {
+ CH_DEVICE(0xb000, 0), /* PE10K FPGA */
+ CH_DEVICE(0x4800, 0), /* T440-dbg */
+ CH_DEVICE(0x4801, 0), /* T420-cr */
+ CH_DEVICE(0x4802, 0), /* T422-cr */
+ { 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
+
+static struct pci_driver cxgb4vf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxgb4vf_pci_tbl,
+ .probe = cxgb4vf_pci_probe,
+ .remove = __devexit_p(cxgb4vf_pci_remove),
+};
+
+/*
+ * Initialize global driver state.
+ */
+static int __init cxgb4vf_module_init(void)
+{
+ int ret;
+
+ /* Debugfs support is optional, just warn if this fails */
+ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cxgb4vf_debugfs_root)
+ printk(KERN_WARNING KBUILD_MODNAME ": could not create"
+ " debugfs entry, continuing\n");
+
+ ret = pci_register_driver(&cxgb4vf_driver);
+ if (ret < 0)
+ debugfs_remove(cxgb4vf_debugfs_root);
+ return ret;
+}
+
+/*
+ * Tear down global driver state.
+ */
+static void __exit cxgb4vf_module_exit(void)
+{
+ pci_unregister_driver(&cxgb4vf_driver);
+ debugfs_remove(cxgb4vf_debugfs_root);
+}
+
+module_init(cxgb4vf_module_init);
+module_exit(cxgb4vf_module_exit);
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
new file mode 100644
index 0000000..3a7c02f
--- /dev/null
+++ b/drivers/net/cxgb4vf/sge.c
@@ -0,0 +1,2449 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/dma-mapping.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Decoded Adapter Parameters.
+ */
+static u32 FL_PG_ORDER; /* large page allocation size */
+static u32 STAT_LEN; /* length of status page at ring end */
+static u32 PKTSHIFT; /* padding between CPL and packet data */
+static u32 FL_ALIGN; /* response queue message alignment */
+
+/*
+ * Constants ...
+ */
+enum {
+ /*
+ * Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ /*
+ * Max number of TX descriptors we clean up at a time. Should be
+ * modest as freeing skbs isn't cheap and it happens while holding
+ * locks. We just need to free packets faster than they arrive, we
+ * eventually catch up and keep the amortized cost reasonable.
+ */
+ MAX_TX_RECLAIM = 16,
+
+ /*
+ * Max number of Rx buffers we replenish at a time. Again keep this
+ * modest, allocating buffers isn't cheap either.
+ */
+ MAX_RX_REFILL = 16,
+
+ /*
+ * Period of the Rx queue check timer. This timer is infrequent as it
+ * has something to do only when the system experiences severe memory
+ * shortage.
+ */
+ RX_QCHECK_PERIOD = (HZ / 2),
+
+ /*
+ * Period of the TX queue check timer and the maximum number of TX
+ * descriptors to be reclaimed by the TX timer.
+ */
+ TX_QCHECK_PERIOD = (HZ / 2),
+ MAX_TIMER_TX_RECLAIM = 100,
+
+ /*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
+ * timer will attempt to refill it.
+ */
+ FL_STARVE_THRES = 4,
+
+ /*
+ * Suspend an Ethernet TX queue with fewer available descriptors than
+ * this. We always want to have room for a maximum sized packet:
+ * inline immediate data + MAX_SKB_FRAGS. This is the same as
+ * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
+ * (see that function and its helpers for a description of the
+ * calculation).
+ */
+ ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
+ ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
+ ((ETHTXQ_MAX_FRAGS-1) & 1) +
+ 2),
+ ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+ ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
+
+ ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
+
+ /*
+ * Max TX descriptor space we allow for an Ethernet packet to be
+ * inlined into a WR. This is limited by the maximum value which
+ * we can specify for immediate data in the firmware Ethernet TX
+ * Work Request.
+ */
+ MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
+
+ /*
+ * Max size of a WR sent through a control TX queue.
+ */
+ MAX_CTRL_WR_LEN = 256,
+
+ /*
+ * Maximum amount of data which we'll ever need to inline into a
+ * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
+ */
+ MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
+ ? MAX_IMM_TX_PKT_LEN
+ : MAX_CTRL_WR_LEN),
+
+ /*
+ * For incoming packets less than RX_COPY_THRES, we copy the data into
+ * an skb rather than referencing the data. We allocate enough
+ * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
+ * of the data (header).
+ */
+ RX_COPY_THRES = 256,
+ RX_PULL_LEN = 128,
+};
+
+/*
+ * Can't define this in the above enum because PKTSHIFT isn't a constant in
+ * the VF Driver ...
+ */
+#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+
+/*
+ * Software state per TX descriptor.
+ */
+struct tx_sw_desc {
+ struct sk_buff *skb; /* socket buffer of TX data source */
+ struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
+};
+
+/*
+ * Software state per RX Free List descriptor. We keep track of the allocated
+ * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
+ * page size and its PCI DMA mapped state are stored in the low bits of the
+ * PCI DMA address as per below.
+ */
+struct rx_sw_desc {
+ struct page *page; /* Free List page buffer */
+ dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
+ /* and flags (see below) */
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
+ * SGE also uses the low 4 bits to determine the size of the buffer. It uses
+ * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
+ * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
+ * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
+ * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
+ * maintained in an inverse sense so the hardware never sees that bit high.
+ */
+enum {
+ RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
+ RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+/**
+ * get_buf_addr - return DMA buffer address of software descriptor
+ * @sdesc: pointer to the software buffer descriptor
+ *
+ * Return the DMA buffer address of a software descriptor (stripping out
+ * our low-order flag bits).
+ */
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
+{
+ return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+/**
+ * is_buf_mapped - is buffer mapped for DMA?
+ * @sdesc: pointer to the software buffer descriptor
+ *
+ * Determine whether the buffer associated with a software descriptor in
+ * mapped for DMA or not.
+ */
+static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
+{
+ return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ * need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ * Returns true if the platfrom needs sk_buff unmapping. The compiler
+ * optimizes away unecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+ /*
+ * This structure is used to tell if the platfrom needs buffer
+ * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
+ */
+ struct dummy {
+ DECLARE_PCI_UNMAP_ADDR(addr);
+ };
+
+ return sizeof(struct dummy) != 0;
+}
+
+/**
+ * txq_avail - return the number of available slots in a TX queue
+ * @tq: the TX queue
+ *
+ * Returns the number of available descriptors in a TX queue.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *tq)
+{
+ return tq->size - 1 - tq->in_use;
+}
+
+/**
+ * fl_cap - return the capacity of a Free List
+ * @fl: the Free List
+ *
+ * Returns the capacity of a Free List. The capacity is less than the
+ * size because an Egress Queue Index Unit worth of descriptors needs to
+ * be left unpopulated, otherwise the Producer and Consumer indices PIDX
+ * and CIDX will match and the hardware will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - FL_PER_EQ_UNIT;
+}
+
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshhold.
+ */
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+ return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+/**
+ * map_skb - map an skb for DMA to the device
+ * @dev: the egress net device
+ * @skb: the packet to map
+ * @addr: a pointer to the base of the DMA mapping array
+ *
+ * Map an skb for DMA to the device and return an array of DMA addresses.
+ */
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto out_err;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+ for (fp = si->frags; fp < end; fp++) {
+ *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+ const struct ulptx_sgl *sgl, const struct sge_txq *tq)
+{
+ const struct ulptx_sge_pair *p;
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (likely(skb_headlen(skb)))
+ dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
+ be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+ else {
+ dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
+ be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+ nfrags--;
+ }
+
+ /*
+ * the complexity below is because of the possibility of a wrap-around
+ * in the middle of an SGL
+ */
+ for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+ if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
+unmap:
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p++;
+ } else if ((u8 *)p == (u8 *)tq->stat) {
+ p = (const struct ulptx_sge_pair *)tq->desc;
+ goto unmap;
+ } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
+ const __be64 *addr = (const __be64 *)tq->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[1]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[2];
+ } else {
+ const __be64 *addr = (const __be64 *)tq->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[1];
+ }
+ }
+ if (nfrags) {
+ __be64 addr;
+
+ if ((u8 *)p == (u8 *)tq->stat)
+ p = (const struct ulptx_sge_pair *)tq->desc;
+ addr = ((u8 *)p + 16 <= (u8 *)tq->stat
+ ? p->addr[0]
+ : *(const __be64 *)tq->desc);
+ dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
+ DMA_TO_DEVICE);
+ }
+}
+
+/**
+ * free_tx_desc - reclaims TX descriptors and their buffers
+ * @adapter: the adapter
+ * @tq: the TX queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims TX descriptors from an SGE TX queue and frees the associated
+ * TX buffers. Called with the TX queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
+ unsigned int n, bool unmap)
+{
+ struct tx_sw_desc *sdesc;
+ unsigned int cidx = tq->cidx;
+ struct device *dev = adapter->pdev_dev;
+
+ const int need_unmap = need_skb_unmap() && unmap;
+
+ sdesc = &tq->sdesc[cidx];
+ while (n--) {
+ /*
+ * If we kept a reference to the original TX skb, we need to
+ * unmap it from PCI DMA space (if required) and free it.
+ */
+ if (sdesc->skb) {
+ if (need_unmap)
+ unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
+ kfree_skb(sdesc->skb);
+ sdesc->skb = NULL;
+ }
+
+ sdesc++;
+ if (++cidx == tq->size) {
+ cidx = 0;
+ sdesc = tq->sdesc;
+ }
+ }
+ tq->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a TX queue.
+ */
+static inline int reclaimable(const struct sge_txq *tq)
+{
+ int hw_cidx = be16_to_cpu(tq->stat->cidx);
+ int reclaimable = hw_cidx - tq->cidx;
+ if (reclaimable < 0)
+ reclaimable += tq->size;
+ return reclaimable;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed TX descriptors
+ * @adapter: the adapter
+ * @tq: the TX queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims TX descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the TX
+ * queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *tq,
+ bool unmap)
+{
+ int avail = reclaimable(tq);
+
+ if (avail) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the TX lock hold time O(1).
+ */
+ if (avail > MAX_TX_RECLAIM)
+ avail = MAX_TX_RECLAIM;
+
+ free_tx_desc(adapter, tq, avail, unmap);
+ tq->in_use -= avail;
+ }
+}
+
+/**
+ * get_buf_size - return the size of an RX Free List buffer.
+ * @sdesc: pointer to the software buffer descriptor
+ */
+static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+{
+ return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+ ? (PAGE_SIZE << FL_PG_ORDER)
+ : PAGE_SIZE;
+}
+
+/**
+ * free_rx_bufs - free RX buffers on an SGE Free List
+ * @adapter: the adapter
+ * @fl: the SGE Free List to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE Free List RX queue. The
+ * buffers must be made inaccessible to hardware before calling this
+ * function.
+ */
+static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
+{
+ while (n--) {
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+ if (is_buf_mapped(sdesc))
+ dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+ get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ put_page(sdesc->page);
+ sdesc->page = NULL;
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ fl->avail--;
+ }
+}
+
+/**
+ * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
+ * @adapter: the adapter
+ * @fl: the SGE Free List
+ *
+ * Unmap the current buffer on an SGE Free List RX queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ * This is used predominantly to "transfer ownership" of an FL buffer
+ * to another entity (typically an skb's fragment list).
+ */
+static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
+{
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+ if (is_buf_mapped(sdesc))
+ dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+ get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ sdesc->page = NULL;
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ fl->avail--;
+}
+
+/**
+ * ring_fl_db - righ doorbell on free list
+ * @adapter: the adapter
+ * @fl: the Free List whose doorbell should be rung ...
+ *
+ * Tell the Scatter Gather Engine that there are new free list entries
+ * available.
+ */
+static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
+{
+ /*
+ * The SGE keeps track of its Producer and Consumer Indices in terms
+ * of Egress Queue Units so we can only tell it about integral numbers
+ * of multiples of Free List Entries per Egress Queue Units ...
+ */
+ if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+ wmb();
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ DBPRIO |
+ QID(fl->cntxt_id) |
+ PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+ fl->pend_cred %= FL_PER_EQ_UNIT;
+ }
+}
+
+/**
+ * set_rx_sw_desc - initialize software RX buffer descriptor
+ * @sdesc: pointer to the softwore RX buffer descriptor
+ * @page: pointer to the page data structure backing the RX buffer
+ * @dma_addr: PCI DMA address (possibly with low-bit flags)
+ */
+static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
+ dma_addr_t dma_addr)
+{
+ sdesc->page = page;
+ sdesc->dma_addr = dma_addr;
+}
+
+/*
+ * Support for poisoning RX buffers ...
+ */
+#define POISON_BUF_VAL -1
+
+static inline void poison_buf(struct page *page, size_t sz)
+{
+#if POISON_BUF_VAL >= 0
+ memset(page_address(page), POISON_BUF_VAL, sz);
+#endif
+}
+
+/**
+ * refill_fl - refill an SGE RX buffer ring
+ * @adapter: the adapter
+ * @fl: the Free List ring to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for the allocations
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
+ * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
+ * of buffers allocated. If afterwards the queue is found critically low,
+ * mark it as starving in the bitmap of starving FLs.
+ */
+static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
+ int n, gfp_t gfp)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+ unsigned int cred = fl->avail;
+ __be64 *d = &fl->desc[fl->pidx];
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
+
+ /*
+ * Sanity: ensure that the result of adding n Free List buffers
+ * won't result in wrapping the SGE's Producer Index around to
+ * it's Consumer Index thereby indicating an empty Free List ...
+ */
+ BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+
+ /*
+ * If we support large pages, prefer large buffers and fail over to
+ * small pages if we can't allocate large pages to satisfy the refill.
+ * If we don't support large pages, drop directly into the small page
+ * allocation code.
+ */
+ if (FL_PG_ORDER == 0)
+ goto alloc_small_pages;
+
+ while (n) {
+ page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ FL_PG_ORDER);
+ if (unlikely(!page)) {
+ /*
+ * We've failed inour attempt to allocate a "large
+ * page". Fail over to the "small page" allocation
+ * below.
+ */
+ fl->large_alloc_failed++;
+ break;
+ }
+ poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+
+ dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
+ PAGE_SIZE << FL_PG_ORDER,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+ /*
+ * We've run out of DMA mapping space. Free up the
+ * buffer and return with what we've managed to put
+ * into the free list. We don't want to fail over to
+ * the small page allocation below in this case
+ * because DMA mapping resources are typically
+ * critical resources once they become scarse.
+ */
+ __free_pages(page, FL_PG_ORDER);
+ goto out;
+ }
+ dma_addr |= RX_LARGE_BUF;
+ *d++ = cpu_to_be64(dma_addr);
+
+ set_rx_sw_desc(sdesc, page, dma_addr);
+ sdesc++;
+
+ fl->avail++;
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ sdesc = fl->sdesc;
+ d = fl->desc;
+ }
+ n--;
+ }
+
+alloc_small_pages:
+ while (n--) {
+ page = __netdev_alloc_page(adapter->port[0],
+ gfp | __GFP_NOWARN);
+ if (unlikely(!page)) {
+ fl->alloc_failed++;
+ break;
+ }
+ poison_buf(page, PAGE_SIZE);
+
+ dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+ netdev_free_page(adapter->port[0], page);
+ break;
+ }
+ *d++ = cpu_to_be64(dma_addr);
+
+ set_rx_sw_desc(sdesc, page, dma_addr);
+ sdesc++;
+
+ fl->avail++;
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ sdesc = fl->sdesc;
+ d = fl->desc;
+ }
+ }
+
+out:
+ /*
+ * Update our accounting state to incorporate the new Free List
+ * buffers, tell the hardware about them and return the number of
+ * bufers which we were able to allocate.
+ */
+ cred = fl->avail - cred;
+ fl->pend_cred += cred;
+ ring_fl_db(adapter, fl);
+
+ if (unlikely(fl_starving(fl))) {
+ smp_wmb();
+ set_bit(fl->cntxt_id, adapter->sge.starving_fl);
+ }
+
+ return cred;
+}
+
+/*
+ * Refill a Free List to its capacity or the Maximum Refill Increment,
+ * whichever is smaller ...
+ */
+static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
+{
+ refill_fl(adapter, fl,
+ min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+ GFP_ATOMIC);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @hwsize: the size of each hardware descriptor
+ * @swsize: the size of each software descriptor
+ * @busaddrp: the physical PCI bus address of the allocated ring
+ * @swringp: return address pointer for software ring
+ * @stat_size: extra space in hardware ring for status information
+ *
+ * Allocates resources for an SGE descriptor ring, such as TX queues,
+ * free buffer lists, response queues, etc. Each SGE ring requires
+ * space for its hardware descriptors plus, optionally, space for software
+ * state associated with each hardware entry (the metadata). The function
+ * returns three values: the virtual address for the hardware ring (the
+ * return value of the function), the PCI bus address of the hardware
+ * ring (in *busaddrp), and the address of the software ring (in swringp).
+ * Both the hardware and software rings are returned zeroed out.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
+ size_t swsize, dma_addr_t *busaddrp, void *swringp,
+ size_t stat_size)
+{
+ /*
+ * Allocate the hardware ring and PCI DMA bus address space for said.
+ */
+ size_t hwlen = nelem * hwsize + stat_size;
+ void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+
+ if (!hwring)
+ return NULL;
+
+ /*
+ * If the caller wants a software ring, allocate it and return a
+ * pointer to it in *swringp.
+ */
+ BUG_ON((swsize != 0) != (swringp != NULL));
+ if (swsize) {
+ void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
+
+ if (!swring) {
+ dma_free_coherent(dev, hwlen, hwring, *busaddrp);
+ return NULL;
+ }
+ *(void **)swringp = swring;
+ }
+
+ /*
+ * Zero out the hardware ring and return its address as our function
+ * value.
+ */
+ memset(hwring, 0, hwlen);
+ return hwring;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits (8-byte units) needed for a Direct
+ * Scatter/Gather List that can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /*
+ * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of TX descriptors for the given flits
+ * @flits: the number of flits
+ *
+ * Returns the number of TX descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int flits)
+{
+ BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
+ return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+ /*
+ * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /*
+ * If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /*
+ * Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embeded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * write_sgl - populate a Scatter/Gather List for a packet
+ * @skb: the packet
+ * @tq: the TX queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into skb main-body data to include in the SGL
+ * @addr: the list of DMA bus addresses for the SGL elements
+ *
+ * Generates a Scatter/Gather List for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @pos must be 16-byte
+ * aligned and within a TX descriptor with available space. @end points
+ * write after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @tq->stat.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ const struct skb_shared_info *si = skb_shinfo(skb);
+ unsigned int nfrags = si->nr_frags;
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+ len = skb_headlen(skb) - start;
+ if (likely(len)) {
+ sgl->len0 = htonl(len);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ nfrags++;
+ } else {
+ sgl->len0 = htonl(si->frags[0].size);
+ sgl->addr0 = cpu_to_be64(addr[1]);
+ }
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
+
+ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->addr[0] = cpu_to_be64(addr[i]);
+ to->addr[1] = cpu_to_be64(addr[++i]);
+ }
+ if (nfrags) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(0);
+ to->addr[0] = cpu_to_be64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)tq->stat)) {
+ unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)tq->stat;
+ memcpy(tq->desc, (u8 *)buf + part0, part1);
+ end = (void *)tq->desc + part1;
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a TX queue's doorbell
+ * @adapter: the adapter
+ * @tq: the TX queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a TX queue.
+ */
+static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
+ int n)
+{
+ /*
+ * Warn if we write doorbells with the wrong priority and write
+ * descriptors before telling HW.
+ */
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ wmb();
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(tq->cntxt_id) | PIDX(n));
+}
+
+/**
+ * inline_tx_skb - inline a packet's data into TX descriptors
+ * @skb: the packet
+ * @tq: the TX queue where the packet will be inlined
+ * @pos: starting position in the TX queue to inline the packet
+ *
+ * Inline a packet's contents directly into TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
+ void *pos)
+{
+ u64 *p;
+ int left = (void *)tq->stat - pos;
+
+ if (likely(skb->len <= left)) {
+ if (likely(!skb->data_len))
+ skb_copy_from_linear_data(skb, pos, skb->len);
+ else
+ skb_copy_bits(skb, 0, pos, skb->len);
+ pos += skb->len;
+ } else {
+ skb_copy_bits(skb, 0, pos, left);
+ skb_copy_bits(skb, left, tq->desc, skb->len - left);
+ pos = (void *)tq->desc + (skb->len - left);
+ }
+
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8)
+ *p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+ int csum_type;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (iph->protocol == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP;
+ else if (iph->protocol == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP;
+ else {
+nocsum:
+ /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return TXPKT_L4CSUM_DIS;
+ }
+ } else {
+ /*
+ * this doesn't work with extension headers
+ */
+ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP6;
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP6;
+ else
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP))
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ else {
+ int start = skb_transport_offset(skb);
+
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_CSUM_START(start) |
+ TXPKT_CSUM_LOC(start + skb->csum_offset);
+ }
+}
+
+/*
+ * Stop an Ethernet TX queue and record that state change.
+ */
+static void txq_stop(struct sge_eth_txq *txq)
+{
+ netif_tx_stop_queue(txq->txq);
+ txq->q.stops++;
+}
+
+/*
+ * Advance our software state for a TX queue by adding n in use descriptors.
+ */
+static inline void txq_advance(struct sge_txq *tq, unsigned int n)
+{
+ tq->in_use += n;
+ tq->pidx += n;
+ if (tq->pidx >= tq->size)
+ tq->pidx -= tq->size;
+}
+
+/**
+ * t4vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u64 cntrl, *end;
+ int qidx, credits;
+ unsigned int flits, ndesc;
+ struct adapter *adapter;
+ struct sge_eth_txq *txq;
+ const struct port_info *pi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ const struct skb_shared_info *ssi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
+
+ /*
+ * The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /*
+ * Figure out which TX Queue we're going to use.
+ */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ BUG_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /*
+ * Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ reclaim_completed_tx(adapter, &txq->q, true);
+
+ /*
+ * Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /*
+ * Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!is_eth_imm(skb) &&
+ unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /*
+ * We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /*
+ * After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshhold" so stop the TX
+ * Queue now. The queue will get started later on when the
+ * firmware informs us that space has opened up.
+ */
+ txq_stop(txq);
+ }
+
+ /*
+ * Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flits, 2)));
+ wr->r3[0] = cpu_to_be64(0);
+ wr->r3[1] = cpu_to_be64(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /*
+ * If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN(sizeof(*lso) +
+ sizeof(*cpl)));
+ /*
+ * Fill in the LSO CPL message.
+ */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE |
+ LSO_LAST_SLICE |
+ LSO_IPV6(v6) |
+ LSO_ETHHDR_LEN(eth_xtra_len/4) |
+ LSO_IPHDR_LEN(l3hdr_len/4) |
+ LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ lso->len = cpu_to_be32(skb->len);
+
+ /*
+ * Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+ cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN(l3hdr_len) |
+ TXPKT_ETHHDR_LEN(eth_xtra_len));
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN(len));
+
+ /*
+ * Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ txq->tx_cso++;
+ } else
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
+
+ /*
+ * If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (vlan_tx_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ }
+
+ /*
+ * Fill in the TX Packet CPL message header.
+ */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ TXPKT_INTF(pi->port_id) |
+ TXPKT_PF(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+#ifdef T4_TRACE
+ T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
+ "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
+ ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
+#endif
+
+ /*
+ * Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (is_eth_imm(skb)) {
+ /*
+ * In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_kfree_skb(skb);
+ } else {
+ /*
+ * Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /*
+ * If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /*
+ * Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+ dev->trans_start = jiffies;
+ ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /*
+ * An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * t4vf_pktgl_free - free a packet gather list
+ * @gl: the gather list
+ *
+ * Releases the pages of a packet gather list. We do not own the last
+ * page on the list and do not free it.
+ */
+void t4vf_pktgl_free(const struct pkt_gl *gl)
+{
+ int frag;
+
+ frag = gl->nfrags - 1;
+ while (frag--)
+ put_page(gl->frags[frag].page);
+}
+
+/**
+ * copy_frags - copy fragments from gather list into skb_shared_info
+ * @si: destination skb shared info structure
+ * @gl: source internal packet gather list
+ * @offset: packet start offset in first page
+ *
+ * Copy an internal packet gather list into a Linux skb_shared_info
+ * structure.
+ */
+static inline void copy_frags(struct skb_shared_info *si,
+ const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ unsigned int n;
+
+ /* usually there's just one frag */
+ si->frags[0].page = gl->frags[0].page;
+ si->frags[0].page_offset = gl->frags[0].page_offset + offset;
+ si->frags[0].size = gl->frags[0].size - offset;
+ si->nr_frags = gl->nfrags;
+
+ n = gl->nfrags - 1;
+ if (n)
+ memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[n].page);
+}
+
+/**
+ * do_gro - perform Generic Receive Offload ingress packet processing
+ * @rxq: ingress RX Ethernet Queue
+ * @gl: gather list for ingress packet
+ * @pkt: CPL header for last packet fragment
+ *
+ * Perform Generic Receive Offload (GRO) ingress packet processing.
+ * We use the standard Linux GRO interfaces for this.
+ */
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ const struct cpl_rx_pkt *pkt)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(&rxq->rspq.napi);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return;
+ }
+
+ copy_frags(skb_shinfo(skb), gl, PKTSHIFT);
+ skb->len = gl->tot_len - PKTSHIFT;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxq->rspq.idx);
+
+ if (unlikely(pkt->vlan_ex)) {
+ struct port_info *pi = netdev_priv(rxq->rspq.netdev);
+ struct vlan_group *grp = pi->vlan_grp;
+
+ rxq->stats.vlan_ex++;
+ if (likely(grp)) {
+ ret = vlan_gro_frags(&rxq->rspq.napi, grp,
+ be16_to_cpu(pkt->vlan));
+ goto stats;
+ }
+ }
+ ret = napi_gro_frags(&rxq->rspq.napi);
+
+stats:
+ if (ret == GRO_HELD)
+ rxq->stats.lro_pkts++;
+ else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+ rxq->stats.lro_merged++;
+ rxq->stats.pkts++;
+ rxq->stats.rx_cso++;
+}
+
+/**
+ * t4vf_ethrx_handler - process an ingress ethernet packet
+ * @rspq: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @gl: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct sk_buff *skb;
+ struct port_info *pi;
+ struct skb_shared_info *ssi;
+ const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ unsigned int len = be16_to_cpu(pkt->len);
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+
+ /*
+ * If this is a good TCP packet and we have Generic Receive Offload
+ * enabled, handle the packet in the GRO path.
+ */
+ if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+ (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
+ !pkt->ip_frag) {
+ do_gro(rxq, gl, pkt);
+ return 0;
+ }
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ */
+ if (len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, gl->frags[0].size);
+ skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
+ } else {
+ skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, RX_PKT_PULL_LEN);
+ skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = (gl->frags[0].page_offset +
+ RX_PKT_PULL_LEN);
+ ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+ skb->len = len + PKTSHIFT;
+ skb->data_len = skb->len - RX_PKT_PULL_LEN;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+ __skb_pull(skb, PKTSHIFT);
+ skb->protocol = eth_type_trans(skb, rspq->netdev);
+ skb_record_rx_queue(skb, rspq->idx);
+ skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
+ pi = netdev_priv(skb->dev);
+ rxq->stats.pkts++;
+
+ if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec &&
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (!pkt->ip_frag)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else {
+ __sum16 c = (__force __sum16)pkt->csum;
+ skb->csum = csum_unfold(c);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ rxq->stats.rx_cso++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (unlikely(pkt->vlan_ex)) {
+ struct vlan_group *grp = pi->vlan_grp;
+
+ rxq->stats.vlan_ex++;
+ if (likely(grp))
+ vlan_hwaccel_receive_skb(skb, grp,
+ be16_to_cpu(pkt->vlan));
+ else
+ dev_kfree_skb_any(skb);
+ } else
+ netif_receive_skb(skb);
+
+ return 0;
+
+nomem:
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @rc: the response control descriptor
+ * @rspq: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *rc,
+ const struct sge_rspq *rspq)
+{
+ return RSPD_GEN(rc->type_gen) == rspq->gen;
+}
+
+/**
+ * restore_rx_bufs - put back a packet's RX buffers
+ * @gl: the packet gather list
+ * @fl: the SGE Free List
+ * @nfrags: how many fragments in @si
+ *
+ * Called when we find out that the current packet, @si, can't be
+ * processed right away for some reason. This is a very rare event and
+ * there's no effort to make this suspension/resumption process
+ * particularly efficient.
+ *
+ * We implement the suspension by putting all of the RX buffers associated
+ * with the current packet back on the original Free List. The buffers
+ * have already been unmapped and are left unmapped, we mark them as
+ * unmapped in order to prevent further unmapping attempts. (Effectively
+ * this function undoes the series of @unmap_rx_buf calls which were done
+ * to create the current packet's gather list.) This leaves us ready to
+ * restart processing of the packet the next time we start processing the
+ * RX Queue ...
+ */
+static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
+ int frags)
+{
+ struct rx_sw_desc *sdesc;
+
+ while (frags--) {
+ if (fl->cidx == 0)
+ fl->cidx = fl->size - 1;
+ else
+ fl->cidx--;
+ sdesc = &fl->sdesc[fl->cidx];
+ sdesc->page = gl->frags[frags].page;
+ sdesc->dma_addr |= RX_UNMAPPED_BUF;
+ fl->avail++;
+ }
+}
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @rspq: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *rspq)
+{
+ rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
+ if (unlikely(++rspq->cidx == rspq->size)) {
+ rspq->cidx = 0;
+ rspq->gen ^= 1;
+ rspq->cur_desc = rspq->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @rspq: the ingress response queue to process
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from a Scatter Gather Engine response queue up to
+ * the supplied budget. Responses include received packets as well as
+ * control messages from firmware or hardware.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+int process_responses(struct sge_rspq *rspq, int budget)
+{
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ int budget_left = budget;
+
+ while (likely(budget_left)) {
+ int ret, rsp_type;
+ const struct rsp_ctrl *rc;
+
+ rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, rspq))
+ break;
+
+ /*
+ * Figure out what kind of response we've received from the
+ * SGE.
+ */
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ skb_frag_t *fp;
+ struct pkt_gl gl;
+ const struct rx_sw_desc *sdesc;
+ u32 bufsz, frag;
+ u32 len = be32_to_cpu(rc->pldbuflen_qid);
+
+ /*
+ * If we get a "new buffer" message from the SGE we
+ * need to move on to the next Free List buffer.
+ */
+ if (len & RSPD_NEWBUF) {
+ /*
+ * We get one "new buffer" message when we
+ * first start up a queue so we need to ignore
+ * it when our offset into the buffer is 0.
+ */
+ if (likely(rspq->offset > 0)) {
+ free_rx_bufs(rspq->adapter, &rxq->fl,
+ 1);
+ rspq->offset = 0;
+ }
+ len = RSPD_LEN(len);
+ }
+
+ /*
+ * Gather packet fragments.
+ */
+ for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
+ BUG_ON(frag >= MAX_SKB_FRAGS);
+ BUG_ON(rxq->fl.avail == 0);
+ sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(sdesc);
+ fp->page = sdesc->page;
+ fp->page_offset = rspq->offset;
+ fp->size = min(bufsz, len);
+ len -= fp->size;
+ if (!len)
+ break;
+ unmap_rx_buf(rspq->adapter, &rxq->fl);
+ }
+ gl.nfrags = frag+1;
+
+ /*
+ * Last buffer remains mapped so explicitly make it
+ * coherent for CPU access and start preloading first
+ * cache line ...
+ */
+ dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
+ get_buf_addr(sdesc),
+ fp->size, DMA_FROM_DEVICE);
+ gl.va = (page_address(gl.frags[0].page) +
+ gl.frags[0].page_offset);
+ prefetch(gl.va);
+
+ /*
+ * Hand the new ingress packet to the handler for
+ * this Response Queue.
+ */
+ ret = rspq->handler(rspq, rspq->cur_desc, &gl);
+ if (likely(ret == 0))
+ rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ else
+ restore_rx_bufs(&gl, &rxq->fl, frag);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ ret = rspq->handler(rspq, rspq->cur_desc, NULL);
+ } else {
+ WARN_ON(rsp_type > RSP_TYPE_CPL);
+ ret = 0;
+ }
+
+ if (unlikely(ret)) {
+ /*
+ * Couldn't process descriptor, back off for recovery.
+ * We use the SGE's last timer which has the longest
+ * interrupt coalescing value ...
+ */
+ const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
+ rspq->next_intr_params =
+ QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ break;
+ }
+
+ rspq_next(rspq);
+ budget_left--;
+ }
+
+ /*
+ * If this is a Response Queue with an associated Free List and
+ * at least two Egress Queue units available in the Free List
+ * for new buffer pointers, refill the Free List.
+ */
+ if (rspq->offset >= 0 &&
+ rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+ __refill_fl(rspq->adapter, &rxq->fl);
+ return budget - budget_left;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for RX processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ unsigned int intr_params;
+ struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
+ int work_done = process_responses(rspq, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ intr_params = rspq->next_intr_params;
+ rspq->next_intr_params = rspq->intr_params;
+ } else
+ intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+
+ t4_write_reg(rspq->adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(work_done) |
+ INGRESSQID((u32)rspq->cntxt_id) |
+ SEINTARM(intr_params));
+ return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_rspq *rspq = cookie;
+
+ napi_schedule(&rspq->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_rspq *intrq = &s->intrq;
+ unsigned int work_done;
+
+ spin_lock(&adapter->sge.intrq_lock);
+ for (work_done = 0; ; work_done++) {
+ const struct rsp_ctrl *rc;
+ unsigned int qid, iq_idx;
+ struct sge_rspq *rspq;
+
+ /*
+ * Grab the next response from the interrupt queue and bail
+ * out if it's not a new response.
+ */
+ rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, intrq))
+ break;
+
+ /*
+ * If the response isn't a forwarded interrupt message issue a
+ * error and go on to the next response message. This should
+ * never happen ...
+ */
+ rmb();
+ if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ dev_err(adapter->pdev_dev,
+ "Unexpected INTRQ response type %d\n",
+ RSPD_TYPE(rc->type_gen));
+ continue;
+ }
+
+ /*
+ * Extract the Queue ID from the interrupt message and perform
+ * sanity checking to make sure it really refers to one of our
+ * Ingress Queues which is active and matches the queue's ID.
+ * None of these error conditions should ever happen so we may
+ * want to either make them fatal and/or conditionalized under
+ * DEBUG.
+ */
+ qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ iq_idx = IQ_IDX(s, qid);
+ if (unlikely(iq_idx >= MAX_INGQ)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d out of range\n", qid);
+ continue;
+ }
+ rspq = s->ingr_map[iq_idx];
+ if (unlikely(rspq == NULL)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d RSPQ=NULL\n", qid);
+ continue;
+ }
+ if (unlikely(rspq->abs_id != qid)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d refers to RSPQ %d\n",
+ qid, rspq->abs_id);
+ continue;
+ }
+
+ /*
+ * Schedule NAPI processing on the indicated Response Queue
+ * and move on to the next entry in the Forwarded Interrupt
+ * Queue.
+ */
+ napi_schedule(&rspq->napi);
+ rspq_next(intrq);
+ }
+
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(work_done) |
+ INGRESSQID(intrq->cntxt_id) |
+ SEINTARM(intrq->intr_params));
+
+ spin_unlock(&adapter->sge.intrq_lock);
+
+ return work_done;
+}
+
+/*
+ * The MSI interrupt handler handles data events from SGE response queues as
+ * well as error and other async events as they all use the same MSI vector.
+ */
+irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+{
+ struct adapter *adapter = cookie;
+
+ process_intrq(adapter);
+ return IRQ_HANDLED;
+}
+
+/**
+ * t4vf_intr_handler - select the top-level interrupt handler
+ * @adapter: the adapter
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X or MSI).
+ */
+irq_handler_t t4vf_intr_handler(struct adapter *adapter)
+{
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ if (adapter->flags & USING_MSIX)
+ return t4vf_sge_intr_msix;
+ else
+ return t4vf_intr_msi;
+}
+
+/**
+ * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
+ * @data: the adapter
+ *
+ * Runs periodically from a timer to perform maintenance of SGE RX queues.
+ *
+ * a) Replenishes RX queues that have run out due to memory shortage.
+ * Normally new RX buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We schedule NAPI to do
+ * the actual refill.
+ */
+static void sge_rx_timer_cb(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *s = &adapter->sge;
+ unsigned int i;
+
+ /*
+ * Scan the "Starving Free Lists" flag array looking for any Free
+ * Lists in need of more free buffers. If we find one and it's not
+ * being actively polled, then bump its "starving" counter and attempt
+ * to refill it. If we're successful in adding enough buffers to push
+ * the Free List over the starving threshold, then we can clear its
+ * "starving" status.
+ */
+ for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
+ unsigned long m;
+
+ for (m = s->starving_fl[i]; m; m &= m - 1) {
+ unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_fl *fl = s->egr_map[id];
+
+ clear_bit(id, s->starving_fl);
+ smp_mb__after_clear_bit();
+
+ /*
+ * Since we are accessing fl without a lock there's a
+ * small probability of a false positive where we
+ * schedule napi but the FL is no longer starving.
+ * No biggie.
+ */
+ if (fl_starving(fl)) {
+ struct sge_eth_rxq *rxq;
+
+ rxq = container_of(fl, struct sge_eth_rxq, fl);
+ if (napi_reschedule(&rxq->rspq.napi))
+ fl->starving++;
+ else
+ set_bit(id, s->starving_fl);
+ }
+ }
+ }
+
+ /*
+ * Reschedule the next scan for starving Free Lists ...
+ */
+ mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+/**
+ * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
+ * @data: the adapter
+ *
+ * Runs periodically from a timer to perform maintenance of SGE TX queues.
+ *
+ * b) Reclaims completed Tx packets for the Ethernet queues. Normally
+ * packets are cleaned up by new Tx packets, this timer cleans up packets
+ * when no new packets are being submitted. This is essential for pktgen,
+ * at least.
+ */
+static void sge_tx_timer_cb(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *s = &adapter->sge;
+ unsigned int i, budget;
+
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+ struct sge_eth_txq *txq = &s->ethtxq[i];
+
+ if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
+ int avail = reclaimable(&txq->q);
+
+ if (avail > budget)
+ avail = budget;
+
+ free_tx_desc(adapter, &txq->q, avail, true);
+ txq->q.in_use -= avail;
+ __netif_tx_unlock(txq->txq);
+
+ budget -= avail;
+ if (!budget)
+ break;
+ }
+
+ i++;
+ if (i >= s->ethqsets)
+ i = 0;
+ } while (i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+
+ /*
+ * If we found too many reclaimable packets schedule a timer in the
+ * near future to continue where we left off. Otherwise the next timer
+ * will be at its normal interval.
+ */
+ mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+/**
+ * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
+ * @adapter: the adapter
+ * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
+ * @dev: the network device associated with the new rspq
+ * @intr_dest: MSI-X vector index (overriden in MSI mode)
+ * @fl: pointer to the new rxq's Free List to be filled in
+ * @hnd: the interrupt handler to invoke for the rspq
+ */
+int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+ bool iqasynch, struct net_device *dev,
+ int intr_dest,
+ struct sge_fl *fl, rspq_handler_t hnd)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct fw_iq_cmd cmd, rpl;
+ int ret, iqandst, flsz = 0;
+
+ /*
+ * If we're using MSI interrupts and we're not initializing the
+ * Forwarded Interrupt Queue itself, then set up this queue for
+ * indirect interrupts to the Forwarded Interrupt Queue. Obviously
+ * the Forwarded Interrupt Queue must be set up before any other
+ * ingress queue ...
+ */
+ if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+ iqandst = SGE_INTRDST_IQ;
+ intr_dest = adapter->sge.intrq.abs_id;
+ } else
+ iqandst = SGE_INTRDST_PCI;
+
+ /*
+ * Allocate the hardware ring for the Response Queue. The size needs
+ * to be a multiple of 16 which includes the mandatory status entry
+ * (regardless of whether the Status Page capabilities are enabled or
+ * not).
+ */
+ rspq->size = roundup(rspq->size, 16);
+ rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
+ 0, &rspq->phys_addr, NULL, 0);
+ if (!rspq->desc)
+ return -ENOMEM;
+
+ /*
+ * Fill in the Ingress Queue Command. Note: Ideally this code would
+ * be in t4vf_hw.c but there are so many parameters and dependencies
+ * on our Linux SGE state that we would end up having to pass tons of
+ * parameters. We'll have to think about how this might be migrated
+ * into OS-independent common code ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
+ FW_IQ_CMD_IQSTART(1) |
+ FW_LEN16(cmd));
+ cmd.type_to_iqandstindex =
+ cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH(iqasynch) |
+ FW_IQ_CMD_VIID(pi->viid) |
+ FW_IQ_CMD_IQANDST(iqandst) |
+ FW_IQ_CMD_IQANUS(1) |
+ FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
+ FW_IQ_CMD_IQANDSTINDEX(intr_dest));
+ cmd.iqdroprss_to_iqesize =
+ cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
+ FW_IQ_CMD_IQGTSMODE |
+ FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
+ cmd.iqsize = cpu_to_be16(rspq->size);
+ cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
+
+ if (fl) {
+ /*
+ * Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit.
+ */
+ fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
+ fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
+ sizeof(__be64), sizeof(struct rx_sw_desc),
+ &fl->addr, &fl->sdesc, STAT_LEN);
+ if (!fl->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Calculate the size of the hardware free list ring plus
+ * status page (which the SGE will place at the end of the
+ * free list ring) in Egress Queue Units.
+ */
+ flsz = (fl->size / FL_PER_EQ_UNIT +
+ STAT_LEN / EQ_UNIT);
+
+ /*
+ * Fill in all the relevant firmware Ingress Queue Command
+ * fields for the free list.
+ */
+ cmd.iqns_to_fl0congen =
+ cpu_to_be32(
+ FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
+ FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0PADEN);
+ cmd.fl0dcaen_to_fl0cidxfthresh =
+ cpu_to_be16(
+ FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
+ cmd.fl0size = cpu_to_be16(flsz);
+ cmd.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ /*
+ * Issue the firmware Ingress Queue Command and extract the results if
+ * it completes successfully.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret)
+ goto err;
+
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ rspq->cur_desc = rspq->desc;
+ rspq->cidx = 0;
+ rspq->gen = 1;
+ rspq->next_intr_params = rspq->intr_params;
+ rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+ rspq->abs_id = be16_to_cpu(rpl.physiqid);
+ rspq->size--; /* subtract status entry */
+ rspq->adapter = adapter;
+ rspq->netdev = dev;
+ rspq->handler = hnd;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ rspq->offset = fl ? 0 : -1;
+
+ if (fl) {
+ fl->cntxt_id = be16_to_cpu(rpl.fl0id);
+ fl->avail = 0;
+ fl->pend_cred = 0;
+ fl->pidx = 0;
+ fl->cidx = 0;
+ fl->alloc_failed = 0;
+ fl->large_alloc_failed = 0;
+ fl->starving = 0;
+ refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
+ }
+
+ return 0;
+
+err:
+ /*
+ * An error occurred. Clean up our partial allocation state and
+ * return the error.
+ */
+ if (rspq->desc) {
+ dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
+ rspq->desc, rspq->phys_addr);
+ rspq->desc = NULL;
+ }
+ if (fl && fl->desc) {
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
+ fl->desc, fl->addr);
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+/**
+ * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
+ * @adapter: the adapter
+ * @txq: pointer to the new txq to be filled in
+ * @devq: the network TX queue associated with the new txq
+ * @iqid: the relative ingress queue ID to which events relating to
+ * the new txq should be directed
+ */
+int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *devq,
+ unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd cmd, rpl;
+ struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * Calculate the size of the hardware TX Queue (including the
+ * status age on the end) in units of TX Descriptors.
+ */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ /*
+ * Allocate the hardware ring for the TX ring (with space for its
+ * status page) along with the associated software descriptor ring.
+ */
+ txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ /*
+ * Fill in the Egress Queue Command. Note: As with the direct use of
+ * the firmware Ingress Queue COmmand above in our RXQ allocation
+ * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
+ * have to see if there's some reasonable way to parameterize it
+ * into the common code ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
+ FW_EQ_ETH_CMD_EQSTART |
+ FW_LEN16(cmd));
+ cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+ cmd.fetchszm_to_iqid =
+ cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
+ FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
+ FW_EQ_ETH_CMD_IQID(iqid));
+ cmd.dcaen_to_eqsize =
+ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
+ FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
+ FW_EQ_ETH_CMD_EQSIZE(nentries));
+ cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ /*
+ * Issue the firmware Egress Queue Command and extract the results if
+ * it completes successfully.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret) {
+ /*
+ * The girmware Ingress Queue Command failed for some reason.
+ * Free up our partial allocation state and return the error.
+ */
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adapter->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ txq->q.in_use = 0;
+ txq->q.cidx = 0;
+ txq->q.pidx = 0;
+ txq->q.stat = (void *)&txq->q.desc[txq->q.size];
+ txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.abs_id =
+ FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
+ txq->txq = devq;
+ txq->tso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->q.stops = 0;
+ txq->q.restarts = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+/*
+ * Free the DMA map resources associated with a TX queue.
+ */
+static void free_txq(struct adapter *adapter, struct sge_txq *tq)
+{
+ dma_free_coherent(adapter->pdev_dev,
+ tq->size * sizeof(*tq->desc) + STAT_LEN,
+ tq->desc, tq->phys_addr);
+ tq->cntxt_id = 0;
+ tq->sdesc = NULL;
+ tq->desc = NULL;
+}
+
+/*
+ * Free the resources associated with a response queue (possibly including a
+ * free list).
+ */
+static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
+ struct sge_fl *fl)
+{
+ unsigned int flid = fl ? fl->cntxt_id : 0xffff;
+
+ t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
+ rspq->cntxt_id, flid, 0xffff);
+ dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
+ rspq->desc, rspq->phys_addr);
+ netif_napi_del(&rspq->napi);
+ rspq->netdev = NULL;
+ rspq->cntxt_id = 0;
+ rspq->abs_id = 0;
+ rspq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(adapter, fl, fl->avail);
+ dma_free_coherent(adapter->pdev_dev,
+ fl->size * sizeof(*fl->desc) + STAT_LEN,
+ fl->desc, fl->addr);
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/**
+ * t4vf_free_sge_resources - free SGE resources
+ * @adapter: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4vf_free_sge_resources(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *rxq = s->ethrxq;
+ struct sge_eth_txq *txq = s->ethtxq;
+ struct sge_rspq *evtq = &s->fw_evtq;
+ struct sge_rspq *intrq = &s->intrq;
+ int qs;
+
+ for (qs = 0; qs < adapter->sge.ethqsets; qs++) {
+ if (rxq->rspq.desc)
+ free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
+ if (txq->q.desc) {
+ t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
+ free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
+ kfree(txq->q.sdesc);
+ free_txq(adapter, &txq->q);
+ }
+ }
+ if (evtq->desc)
+ free_rspq_fl(adapter, evtq, NULL);
+ if (intrq->desc)
+ free_rspq_fl(adapter, intrq, NULL);
+}
+
+/**
+ * t4vf_sge_start - enable SGE operation
+ * @adapter: the adapter
+ *
+ * Start tasklets and timers associated with the DMA engine.
+ */
+void t4vf_sge_start(struct adapter *adapter)
+{
+ adapter->sge.ethtxq_rover = 0;
+ mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+ mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ * t4vf_sge_stop - disable SGE operation
+ * @adapter: the adapter
+ *
+ * Stop tasklets and timers associated with the DMA engine. Note that
+ * this is effective only if measures have been taken to disable any HW
+ * events that may restart them.
+ */
+void t4vf_sge_stop(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+
+ if (s->rx_timer.function)
+ del_timer_sync(&s->rx_timer);
+ if (s->tx_timer.function)
+ del_timer_sync(&s->tx_timer);
+}
+
+/**
+ * t4vf_sge_init - initialize SGE
+ * @adapter: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+int t4vf_sge_init(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 fl0 = sge_params->sge_fl_buffer_size[0];
+ u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ struct sge *s = &adapter->sge;
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver. Ideally we should be able to deal
+ * with _any_ configuration. Practice is different ...
+ */
+ if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl0, fl1);
+ return -EINVAL;
+ }
+ if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now translate the adapter parameters into our internal forms.
+ */
+ if (fl1)
+ FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
+ STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+ PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
+ FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+ SGE_INGPADBOUNDARY_SHIFT);
+
+ /*
+ * Set up tasklet timers.
+ */
+ setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
+ setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+
+ /*
+ * Initialize Forwarded Interrupt Queue lock.
+ */
+ spin_lock_init(&s->intrq_lock);
+
+ return 0;
+}
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
new file mode 100644
index 0000000..5c7bde7
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -0,0 +1,273 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_COMMON_H__
+#define __T4VF_COMMON_H__
+
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * The "len16" field of a Firmware Command Structure ...
+ */
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+/*
+ * Per-VF statistics.
+ */
+struct t4vf_port_stats {
+ /*
+ * TX statistics.
+ */
+ u64 tx_bcast_bytes; /* broadcast */
+ u64 tx_bcast_frames;
+ u64 tx_mcast_bytes; /* multicast */
+ u64 tx_mcast_frames;
+ u64 tx_ucast_bytes; /* unicast */
+ u64 tx_ucast_frames;
+ u64 tx_drop_frames; /* TX dropped frames */
+ u64 tx_offload_bytes; /* offload */
+ u64 tx_offload_frames;
+
+ /*
+ * RX statistics.
+ */
+ u64 rx_bcast_bytes; /* broadcast */
+ u64 rx_bcast_frames;
+ u64 rx_mcast_bytes; /* multicast */
+ u64 rx_mcast_frames;
+ u64 rx_ucast_bytes;
+ u64 rx_ucast_frames; /* unicast */
+
+ u64 rx_err_frames; /* RX error frames */
+};
+
+/*
+ * Per-"port" (Virtual Interface) link configuration ...
+ */
+struct link_config {
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned char link_ok; /* link up? */
+};
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+/*
+ * General device parameters ...
+ */
+struct dev_params {
+ u32 fwrev; /* firmware version */
+ u32 tprev; /* TP Microcode Version */
+};
+
+/*
+ * Scatter Gather Engine parameters. These are almost all determined by the
+ * Physical Function Driver. We just need to grab them to see within which
+ * environment we're playing ...
+ */
+struct sge_params {
+ u32 sge_control; /* padding, boundaries, lengths, etc. */
+ u32 sge_host_page_size; /* RDMA page sizes */
+ u32 sge_queues_per_page; /* RDMA queues/page */
+ u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
+ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
+ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
+ u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+};
+
+/*
+ * Vital Product Data parameters.
+ */
+struct vpd_params {
+ u32 cclk; /* Core Clock (KHz) */
+};
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ int synmapen:1; /* SYN Map Enable */
+ int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
+ int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
+ int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
+ int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
+ int ofdmapen:1; /* Offload Map Enable */
+ int tnlmapen:1; /* Tunnel Map Enable */
+ int tnlalllookup:1; /* Tunnel All Lookup */
+ int hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Virtual Interface RSS Configuration in host-native format.
+ */
+union rss_vi_config {
+ struct {
+ u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
+ int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
+ int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
+ int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
+ int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
+ int udpen; /* hash 4-tuple UDP ingress packets */
+ } basicvirtual;
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
+/*
+ * Per-"adapter" (Virtual Function) parameters.
+ */
+struct adapter_params {
+ struct dev_params dev; /* general device parameters */
+ struct sge_params sge; /* Scatter Gather Engine */
+ struct vpd_params vpd; /* Vital Product Data */
+ struct rss_params rss; /* Receive Side Scaling */
+ struct vf_resources vfres; /* Virtual Function Resource limits */
+ u8 nports; /* # of Ethernet "ports" */
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; iter++)
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+ return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
+{
+ return adapter->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
+ unsigned int us)
+{
+ return (us * adapter->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ return (ticks * 1000) / adapter->params.vpd.cclk;
+}
+
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+int __devinit t4vf_wait_dev_ready(struct adapter *);
+int __devinit t4vf_port_init(struct adapter *, int);
+
+int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
+int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+
+int t4vf_get_sge_params(struct adapter *);
+int t4vf_get_vpd_params(struct adapter *);
+int t4vf_get_dev_params(struct adapter *);
+int t4vf_get_rss_glb_config(struct adapter *);
+int t4vf_get_vfres(struct adapter *);
+
+int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
+ union rss_vi_config *);
+int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
+ union rss_vi_config *);
+int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
+ const u16 *, int);
+
+int t4vf_alloc_vi(struct adapter *, int);
+int t4vf_free_vi(struct adapter *, int);
+int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
+int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
+
+int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
+ bool);
+int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
+ const u8 **, u16 *, u64 *, bool);
+int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
+int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
+int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
+
+int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
+ unsigned int);
+int t4vf_eth_eq_free(struct adapter *, unsigned int);
+
+int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
+
+#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/cxgb4vf/t4vf_defs.h b/drivers/net/cxgb4vf/t4vf_defs.h
new file mode 100644
index 0000000..c7b127d
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_DEFS_H__
+#define __T4VF_DEFS_H__
+
+#include "../cxgb4/t4_regs.h"
+
+/*
+ * The VF Register Map.
+ *
+ * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
+ * bus module (PL) and CPU Interface Module (CIM) components are mapped via
+ * the Slice to Module Map Table (see below) in the Physical Function Register
+ * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
+ * and Offset registers in the PF Register Map. The MBDATA base address is
+ * quite constrained as it determines the Mailbox Data addresses for both PFs
+ * and VFs, and therefore must fit in both the VF and PF Register Maps without
+ * overlapping other registers.
+ */
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define T4VF_MPS_BASE_ADDR 0x0100
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T4VF_CIM_BASE_ADDR 0x0300
+
+#define T4VF_REGMAP_START 0x0000
+#define T4VF_REGMAP_SIZE 0x0400
+
+/*
+ * There's no hardware limitation which requires that the addresses of the
+ * Mailbox Data in the fixed CIM PF map and the programmable VF map must
+ * match. However, it's a useful convention ...
+ */
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#endif
+
+/*
+ * Virtual Function "Slice to Module Map Table" definitions.
+ *
+ * This table allows us to map subsets of the various module register sets
+ * into the T4VF Register Map. Each table entry identifies the index of the
+ * module whose registers are being mapped, the offset within the module's
+ * register set that the mapping should start at, the limit of the mapping,
+ * and the offset within the T4VF Register Map to which the module's registers
+ * are being mapped. All addresses and qualtities are in terms of 32-bit
+ * words. The "limit" value is also in terms of 32-bit words and is equal to
+ * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
+ * relation rather than a "<").
+ */
+#define T4VF_MOD_MAP(module, index, first, last) \
+ T4VF_MOD_MAP_##module##_INDEX = (index), \
+ T4VF_MOD_MAP_##module##_FIRST = (first), \
+ T4VF_MOD_MAP_##module##_LAST = (last), \
+ T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
+ T4VF_MOD_MAP_##module##_BASE = \
+ (T4VF_##module##_BASE_ADDR/4 + (first)/4), \
+ T4VF_MOD_MAP_##module##_LIMIT = \
+ (T4VF_##module##_BASE_ADDR/4 + (last)/4),
+
+#define SGE_VF_KDOORBELL 0x0
+#define SGE_VF_GTS 0x4
+#define MPS_VF_CTL 0x0
+#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define PL_VF_WHOAMI 0x0
+#define CIM_VF_EXT_MAILBOX_CTRL 0x0
+#define CIM_VF_EXT_MAILBOX_STATUS 0x4
+
+enum {
+ T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
+ T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
+ T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI)
+ T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
+};
+
+/*
+ * There isn't a Slice to Module Map Table entry for the Mailbox Data
+ * registers, but it's convenient to use similar names as above. There are 8
+ * little-endian 64-bit Mailbox Data registers. Note that the "instances"
+ * value below is in terms of 32-bit words which matches the "word" addressing
+ * space we use above for the Slice to Module Map Space.
+ */
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
+
+#define T4VF_MBDATA_FIRST 0
+#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
+
+#endif /* __T4T4VF_DEFS_H__ */
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
new file mode 100644
index 0000000..1ef2528
--- /dev/null
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -0,0 +1,1333 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+ else
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order
+ * (since the firmware data structures are specified in a big-endian layout).
+ */
+static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
+ u32 mbox_data)
+{
+ for ( ; size; size -= 8, mbox_data += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
+}
+
+/*
+ * Dump contents of mailbox with a leading tag.
+ */
+static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+{
+ dev_err(adapter->pdev_dev,
+ "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ static int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+ u32 v;
+ int i, ms, delay_idx;
+ const __be64 *p;
+ u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+ v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ if (v != MBOX_OWNER_DRV)
+ return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+ t4_write_reg(adapter, mbox_ctl,
+ MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; i < 500; i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx];
+ if (delay_idx < ARRAY_SIZE(delay))
+ delay_idx++;
+ msleep(ms);
+ } else
+ mdelay(ms);
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ MBOWNER(MBOX_OWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0).
+ */
+
+ /* return value in low-order little-endian word */
+ v = t4_read_reg(adapter, mbox_data);
+ if (FW_CMD_RETVAL_GET(v))
+ dump_mbox(adapter, "FW Error", mbox_data);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & FW_CMD_REQUEST) == 0);
+ get_mbox_rpl(adapter, rpl, size, mbox_data);
+ WARN_ON((be32_to_cpu(*(u32 *)rpl)
+ & FW_CMD_REQUEST) != 0);
+ }
+ t4_write_reg(adapter, mbox_ctl,
+ MBOWNER(MBOX_OWNER_NONE));
+ return -FW_CMD_RETVAL_GET(v);
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dump_mbox(adapter, "FW Timeout", mbox_data);
+ return -ETIMEDOUT;
+}
+
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by hardware
+ * inexact (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: link capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising = lc->supported;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * t4vf_port_init - initialize port hardware/software state
+ * @adapter: the adapter
+ * @pidx: the adapter port index
+ */
+int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ struct fw_port_cmd port_cmd, port_rpl;
+ int v;
+ u32 word;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface information
+ * like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
+ v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (v)
+ return v;
+
+ BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
+ pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
+ t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're done
+ * now. Otherwise, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_PORT_CMD_PORTID(pi->port_id));
+ port_cmd.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
+ if (v)
+ return v;
+
+ v = 0;
+ word = be16_to_cpu(port_rpl.u.info.pcap);
+ if (word & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (word & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (word & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ if (word & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ init_link_config(&pi->link_cfg, v);
+
+ return 0;
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ int i, ret;
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams].mnem), 16);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = htonl(*params++);
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ int i;
+ struct fw_params_cmd cmd;
+ struct fw_params_param *p;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
+ * @adapter: the adapter
+ *
+ * Retrieves various core SGE parameters in the form of hardware SGE
+ * register values. The caller is responsible for decoding these as
+ * needed. The SGE parameters are stored in @adapter->params.sge.
+ */
+int t4vf_get_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
+ params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
+ params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
+ params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adapter, 7, params, vals);
+ if (v)
+ return v;
+ sge_params->sge_control = vals[0];
+ sge_params->sge_host_page_size = vals[1];
+ sge_params->sge_fl_buffer_size[0] = vals[2];
+ sge_params->sge_fl_buffer_size[1] = vals[3];
+ sge_params->sge_timer_value_0_and_1 = vals[4];
+ sge_params->sge_timer_value_2_and_3 = vals[5];
+ sge_params->sge_timer_value_4_and_5 = vals[6];
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v)
+ return v;
+ sge_params->sge_ingress_rx_threshold = vals[0];
+
+ return 0;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v)
+ return v;
+ vpd_params->cclk = vals[0];
+
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device parameters. The parameters are stored in
+ * @adapter->params.dev.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ struct dev_params *dev_params = &adapter->params.dev;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v)
+ return v;
+ dev_params->fwrev = vals[0];
+ dev_params->tprev = vals[1];
+
+ return 0;
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ /*
+ * Transate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
+ be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu(
+ rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+
+ rss->u.basicvirtual.ofdmapen =
+ ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+
+ rss->u.basicvirtual.tnlmapen =
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ int v;
+ u32 word;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
+ vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
+ vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = FW_PFVF_CMD_TC_GET(word);
+ vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
+ vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
+ vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
+ vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
+
+ return 0;
+}
+
+/**
+ * t4vf_read_rss_vi_config - read a VI's RSS configuration
+ * @adapter: the adapter
+ * @viid: Virtual Interface ID
+ * @config: pointer to host-native VI RSS Configuration buffer
+ *
+ * Reads the Virtual Interface's RSS configuration information and
+ * translates it into CPU-native format.
+ */
+int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
+ union rss_vi_config *config)
+{
+ struct fw_rss_vi_config_cmd cmd, rpl;
+ int v;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ switch (adapter->params.rss.mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
+
+ config->basicvirtual.ip6fourtupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
+ config->basicvirtual.ip6twotupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
+ config->basicvirtual.ip4fourtupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
+ config->basicvirtual.ip4twotupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
+ config->basicvirtual.udpen =
+ ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
+ config->basicvirtual.defaultq =
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * t4vf_write_rss_vi_config - write a VI's RSS configuration
+ * @adapter: the adapter
+ * @viid: Virtual Interface ID
+ * @config: pointer to host-native VI RSS Configuration buffer
+ *
+ * Write the Virtual Interface's RSS configuration information
+ * (translating it into firmware-native format before writing).
+ */
+int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
+ union rss_vi_config *config)
+{
+ struct fw_rss_vi_config_cmd cmd, rpl;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ switch (adapter->params.rss.mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = 0;
+
+ if (config->basicvirtual.ip6fourtupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+ if (config->basicvirtual.ip6twotupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+ if (config->basicvirtual.ip4fourtupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+ if (config->basicvirtual.ip4twotupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+ if (config->basicvirtual.udpen)
+ word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
+ config->basicvirtual.defaultq);
+ cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+}
+
+/**
+ * t4vf_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @viid: Virtual Interface of RSS Table Slice
+ * @start: starting entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range 0..1023.
+ */
+int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
+ int start, int n, const u16 *rspq, int nrspq)
+{
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq+nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ /*
+ * Initialize firmware command template to write the RSS table.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ /*
+ * Each firmware RSS command can accommodate up to 32 RSS Ingress
+ * Queue Identifiers. These Ingress Queue IDs are packed three to
+ * a 32-bit word as 10-bit values with the upper remaining 2 bits
+ * reserved.
+ */
+ while (n > 0) {
+ __be32 *qp = &cmd.iq0_to_iq2;
+ int nq = min(n, 32);
+ int ret;
+
+ /*
+ * Set up the firmware RSS command header to send the next
+ * "nq" Ingress Queue IDs to the firmware.
+ */
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
+
+ /*
+ * "nq" more done for the start of the next loop.
+ */
+ start += nq;
+ n -= nq;
+
+ /*
+ * While there are still Ingress Queue IDs to stuff into the
+ * current firmware RSS command, retrieve them from the
+ * Ingress Queue ID array and insert them into the command.
+ */
+ while (nq > 0) {
+ /*
+ * Grab up to the next 3 Ingress Queue IDs (wrapping
+ * around the Ingress Queue ID array if necessary) and
+ * insert them into the firmware RSS command at the
+ * current 3-tuple position within the commad.
+ */
+ u16 qbuf[3];
+ u16 *qbp = qbuf;
+ int nqbuf = min(3, nq);
+
+ nq -= nqbuf;
+ qbuf[0] = qbuf[1] = qbuf[2] = 0;
+ while (nqbuf) {
+ nqbuf--;
+ *qbp++ = *rsp++;
+ if (rsp >= rsp_end)
+ rsp = rspq;
+ }
+ *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
+ FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
+ FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ }
+
+ /*
+ * Send this portion of the RRS table update to the firmware;
+ * bail out on any errors.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_alloc_vi - allocate a virtual interface on a port
+ * @adapter: the adapter
+ * @port_id: physical port associated with the VI
+ *
+ * Allocate a new Virtual Interface and bind it to the indicated
+ * physical port. Return the new Virtual Interface Identifier on
+ * success, or a [negative] error number on failure.
+ */
+int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
+}
+
+/**
+ * t4vf_free_vi -- free a virtual interface
+ * @adapter: the adapter
+ * @viid: the virtual interface identifier
+ *
+ * Free a previously allocated Virtual Interface. Return an error on
+ * failure.
+ */
+int t4vf_free_vi(struct adapter *adapter, int viid)
+{
+ struct fw_vi_cmd cmd;
+
+ /*
+ * Execute a VI command to free the Virtual Interface.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ FW_VI_CMD_FREE);
+ cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_enable_vi - enable/disable a virtual interface
+ * @adapter: the adapter
+ * @viid: the Virtual Interface ID
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface.
+ */
+int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ struct fw_vi_enable_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_VI_ENABLE_CMD_VIID(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
+ FW_VI_ENABLE_CMD_EEN(tx_en) |
+ FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_identify_port - identify a VI's port by blinking its LED
+ * @adapter: the adapter
+ * @viid: the Virtual Interface ID
+ * @nblinks: how many times to blink LED at 2.5 Hz
+ *
+ * Identifies a VI's port by blinking its LED.
+ */
+int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
+ unsigned int nblinks)
+{
+ struct fw_vi_enable_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_VI_ENABLE_CMD_VIID(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
+ FW_LEN16(cmd));
+ cmd.blinkdur = cpu_to_be16(nblinks);
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_set_rxmode - set Rx properties of a virtual interface
+ * @adapter: the adapter
+ * @viid: the VI id
+ * @mtu: the new MTU or -1 for no change
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ * -1 no change
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd cmd;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = FW_VI_RXMODE_CMD_MTU_MASK;
+ if (promisc < 0)
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ if (all_multi < 0)
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ if (bcast < 0)
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ if (vlanex < 0)
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_VI_RXMODE_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ cmd.mtu_to_vlanexen =
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ * @adapter: the adapter
+ * @viid: the Virtual Interface Identifier
+ * @free: if true any existing filters for this VI id are first removed
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @idx: where to store the index of each allocated filter
+ * @hash: pointer to hash address filter bitmap
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an exact-match filter for each of the supplied addresses and
+ * sets it to the corresponding address. If @idx is not %NULL it should
+ * have at least @naddr entries, each of which will be set to the index of
+ * the filter allocated for the corresponding MAC address. If a filter
+ * could not be allocated for an address its index is set to 0xffff.
+ * If @hash is not %NULL addresses that fail to allocate an exact filter
+ * are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ * Returns a negative error number or the number of filters allocated.
+ */
+int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
+ unsigned int naddr, const u8 **addr, u16 *idx,
+ u64 *hash, bool sleep_ok)
+{
+ int i, ret;
+ struct fw_vi_mac_cmd cmd, rpl;
+ struct fw_vi_mac_exact *p;
+ size_t len16;
+
+ if (naddr > ARRAY_SIZE(cmd.u.exact))
+ return -EINVAL;
+ len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[naddr]), 16);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16(len16));
+
+ for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
+ p->valid_to_idx =
+ cpu_to_be16(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+ }
+
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
+ if (ret)
+ return ret;
+
+ for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ ret++;
+ else if (hash)
+ *hash |= (1 << hash_mac_addr(addr[i]));
+ }
+ return ret;
+}
+
+/**
+ * t4vf_change_mac - modifies the exact-match filter for a MAC address
+ * @adapter: the adapter
+ * @viid: the Virtual Interface ID
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: if idx < 0, the new MAC allocation should be persistent
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address.
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the
+ * one being used by the old address value and allocate a new filter for
+ * the new address value. @idx can be -1 if the address is a new
+ * addition.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value.
+ */
+int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
+ int idx, const u8 *addr, bool persist)
+{
+ int ret;
+ struct fw_vi_mac_cmd cmd, rpl;
+ struct fw_vi_mac_exact *p = &cmd.u.exact[0];
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[1]), 16);
+
+ /*
+ * If this is a new allocation, determine whether it should be
+ * persistent (across a "freemacs" operation) or not.
+ */
+ if (idx < 0)
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0) {
+ p = &rpl.u.exact[0];
+ ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+ if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4vf_set_addr_hash - program the MAC inexact-match hash filter
+ * @adapter: the adapter
+ * @viid: the Virtual Interface Identifier
+ * @ucast: whether the hash filter should also match unicast addresses
+ * @vec: the value to be written to the hash filter
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd cmd;
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[0]), 16);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_VI_ENABLE_CMD_VIID(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
+ FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+ FW_CMD_LEN16(len16));
+ cmd.u.hash.hashvec = cpu_to_be64(vec);
+ return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+int t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct t4vf_port_stats *s)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct fw_vi_stats_vf fwstats;
+ unsigned int rem = VI_VF_NUM_STATS;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
+ FW_VI_STATS_CMD_VIID(pi->viid) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
+ FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
+ s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
+ s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
+ s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
+ s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
+ s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
+
+ s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
+ s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
+ s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
+ s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+
+ s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_iq_free - free an ingress queue and its free lists
+ * @adapter: the adapter
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue ID
+ * @fl0id: FL0 queue ID or 0xffff if no attached FL0
+ * @fl1id: FL1 queue ID or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated free lists, if any.
+ */
+int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
+ unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
+ FW_LEN16(cmd));
+ cmd.type_to_iqandstindex =
+ cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
+
+ cmd.iqid = cpu_to_be16(iqid);
+ cmd.fl0id = cpu_to_be16(fl0id);
+ cmd.fl1id = cpu_to_be16(fl1id);
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_eth_eq_free - free an Ethernet egress queue
+ * @adapter: the adapter
+ * @eqid: egress queue ID
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
+ FW_LEN16(cmd));
+ cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_handle_fw_rpl - process a firmware reply message
+ * @adapter: the adapter
+ * @rpl: start of the firmware message
+ *
+ * Processes a firmware message, such as link state change messages.
+ */
+int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
+{
+ struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+ u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
+
+ switch (opcode) {
+ case FW_PORT_CMD: {
+ /*
+ * Link/module state change message.
+ */
+ const struct fw_port_cmd *port_cmd = (void *)rpl;
+ u32 word;
+ int action, port_id, link_ok, speed, fc, pidx;
+
+ /*
+ * Extract various fields from port status change message.
+ */
+ action = FW_PORT_CMD_ACTION_GET(
+ be32_to_cpu(port_cmd->action_to_len16));
+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+ dev_err(adapter->pdev_dev,
+ "Unknown firmware PORT reply action %x\n",
+ action);
+ break;
+ }
+
+ port_id = FW_PORT_CMD_PORTID_GET(
+ be32_to_cpu(port_cmd->op_to_portid));
+
+ word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+ link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
+ speed = 0;
+ fc = 0;
+ if (word & FW_PORT_CMD_RXPAUSE)
+ fc |= PAUSE_RX;
+ if (word & FW_PORT_CMD_TXPAUSE)
+ fc |= PAUSE_TX;
+ if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ speed = SPEED_100;
+ else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ speed = SPEED_1000;
+ else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ speed = SPEED_10000;
+
+ /*
+ * Scan all of our "ports" (Virtual Interfaces) looking for
+ * those bound to the physical port which has changed. If
+ * our recorded state doesn't match the current state,
+ * signal that change to the OS code.
+ */
+ for_each_port(adapter, pidx) {
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct link_config *lc;
+
+ if (pi->port_id != port_id)
+ continue;
+
+ lc = &pi->link_cfg;
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) {
+ /* something changed */
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ t4vf_os_link_changed(adapter, pidx, link_ok);
+ }
+ }
+ break;
+ }
+
+ default:
+ dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
+ opcode);
+ }
+ return 0;
+}
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index f654db9..a4a0d2b 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 4dc02c7..307a72f4 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -359,6 +359,7 @@
#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
@@ -714,6 +715,7 @@
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c0b3db4..9ee133f 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -164,6 +164,7 @@ enum e1000_boards {
board_ich9lan,
board_ich10lan,
board_pchlan,
+ board_pch2lan,
};
struct e1000_queue_stats {
@@ -421,6 +422,8 @@ struct e1000_info {
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define FLAG2_IS_DISCARDING (1 << 2)
#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
+#define FLAG2_HAS_PHY_STATS (1 << 4)
+#define FLAG2_HAS_EEE (1 << 5)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -458,7 +461,6 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
@@ -476,6 +478,7 @@ extern struct e1000_info e1000_ich8_info;
extern struct e1000_info e1000_ich9_info;
extern struct e1000_info e1000_ich10_info;
extern struct e1000_info e1000_pch_info;
+extern struct e1000_info e1000_pch2_info;
extern struct e1000_info e1000_es2_info;
extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -494,6 +497,8 @@ extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 38d79a6..45aebb4 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 2c52121..6355a1b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -118,7 +118,6 @@ static int e1000_get_settings(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status;
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -156,22 +155,29 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- status = er32(STATUS);
- if (status & E1000_STATUS_LU) {
- if (status & E1000_STATUS_SPEED_1000)
- ecmd->speed = 1000;
- else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = 100;
- else
- ecmd->speed = 10;
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
- if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
- else
- ecmd->duplex = DUPLEX_HALF;
+ if (netif_running(netdev)) {
+ if (netif_carrier_ok(netdev)) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex - 1;
+ }
} else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
+ u32 status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ ecmd->speed = 1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ ecmd->speed = 100;
+ else
+ ecmd->speed = 10;
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
}
ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
@@ -179,7 +185,7 @@ static int e1000_get_settings(struct net_device *netdev,
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
- !hw->mac.get_link_status)
+ netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
else
@@ -191,19 +197,15 @@ static int e1000_get_settings(struct net_device *netdev,
static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_hw *hw = &adapter->hw;
/*
- * If the link is not reported up to netdev, interrupts are disabled,
- * and so the physical link state may have changed since we last
- * looked. Set get_link_status to make sure that the true link
- * state is interrogated, rather than pulling a cached and possibly
- * stale link state from the driver.
+ * Avoid touching hardware registers when possible, otherwise
+ * link negotiation can get messed up when user-level scripts
+ * are rapidly polling the driver to see if link is up.
*/
- if (!netif_carrier_ok(netdev))
- mac->get_link_status = 1;
-
- return e1000e_has_link(adapter);
+ return netif_running(netdev) ? netif_carrier_ok(netdev) :
+ !!(er32(STATUS) & E1000_STATUS_LU);
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
@@ -880,6 +882,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
switch (mac->type) {
case e1000_ich10lan:
case e1000_pchlan:
+ case e1000_pch2lan:
mask |= (1 << 18);
break;
default:
@@ -1263,33 +1266,36 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
hw->mac.autoneg = 0;
- /* Workaround: K1 must be disabled for stable 1Gbps operation */
- if (hw->mac.type == e1000_pchlan)
- e1000_configure_k1_ich8lan(hw, false);
-
- if (hw->phy.type == e1000_phy_m88) {
- /* Auto-MDI/MDIX Off */
- e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
- /* reset to update Auto-MDI/MDIX */
- e1e_wphy(hw, PHY_CONTROL, 0x9140);
- /* autoneg off */
- e1e_wphy(hw, PHY_CONTROL, 0x8140);
- } else if (hw->phy.type == e1000_phy_gg82563)
- e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
-
- ctrl_reg = er32(CTRL);
-
- switch (hw->phy.type) {
- case e1000_phy_ife:
+ if (hw->phy.type == e1000_phy_ife) {
/* force 100, set loopback */
e1e_wphy(hw, PHY_CONTROL, 0x6100);
/* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_100 |/* Force Speed to 100 */
E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+ udelay(500);
+
+ return 0;
+ }
+
+ /* Specific PHY configuration for loopback */
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ /* Auto-MDI/MDIX Off */
+ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ break;
+ case e1000_phy_gg82563:
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
break;
case e1000_phy_bm:
/* Set Default MAC Interface speed to 1GB */
@@ -1312,23 +1318,41 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* Set Early Link Enable */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
- /* fall through */
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82578:
+ /* Workaround: K1 must be disabled for stable 1Gbps operation */
+ e1000_configure_k1_ich8lan(hw, false);
+ break;
+ case e1000_phy_82579:
+ /* Disable PHY energy detect power down */
+ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ /* Disable full chip energy detect */
+ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
+ /* Enable loopback on the PHY */
+#define I82577_PHY_LBK_CTRL 19
+ e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
+ break;
default:
- /* force 1000, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x4140);
- mdelay(250);
+ break;
+ }
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = er32(CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ /* force 1000, set loopback */
+ e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ mdelay(250);
- if (adapter->flags & FLAG_IS_ICH)
- ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
- }
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->flags & FLAG_IS_ICH)
+ ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
if (hw->phy.media_type == e1000_media_type_copper &&
hw->phy.type == e1000_phy_m88) {
@@ -1868,6 +1892,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
if ((hw->phy.type == e1000_phy_ife) ||
(hw->mac.type == e1000_pchlan) ||
+ (hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_82583) ||
(hw->mac.type == e1000_82574)) {
INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
@@ -2026,7 +2051,6 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 5d1220d..0cd569a 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -217,7 +217,10 @@ enum e1e_registers {
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
- E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
+ E1000_FFLT_DBG = 0x05F04, /* Debug Register */
+ E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
+#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
+#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
E1000_HICR = 0x08F00, /* Host Interface Control */
};
@@ -303,13 +306,14 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
-#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x0002
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
@@ -387,6 +391,8 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_REVISION_4 4
@@ -406,6 +412,7 @@ enum e1000_mac_type {
e1000_ich9lan,
e1000_ich10lan,
e1000_pchlan,
+ e1000_pch2lan,
};
enum e1000_media_type {
@@ -442,6 +449,7 @@ enum e1000_phy_type {
e1000_phy_bm,
e1000_phy_82578,
e1000_phy_82577,
+ e1000_phy_82579,
};
enum e1000_bus_width {
@@ -929,6 +937,7 @@ struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
+ bool eee_disable;
};
struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index b2507d9..63930d1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,8 @@
* 82577LC Gigabit Network Connection
* 82578DM Gigabit Network Connection
* 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
*/
#include "e1000.h"
@@ -126,6 +128,13 @@
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@@ -226,6 +235,8 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -277,13 +288,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. If the manageability engine (ME) is
+ * disabled, then toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode.
+ */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- /*
- * The MAC-PHY interconnect may still be in SMBus mode
- * after Sx->S0. Toggle the LANPHYPC Value bit to force
- * the interconnect to PCIe mode, but only if there is no
- * firmware present otherwise firmware will have done it.
- */
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -324,6 +335,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
switch (phy->type) {
case e1000_phy_82577:
+ case e1000_phy_82579:
phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
e1000_phy_force_speed_duplex_82577;
@@ -515,6 +527,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* ID LED init */
mac->ops.id_led_init = e1000e_id_led_init;
/* setup LED */
@@ -526,6 +540,9 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->ops.led_off = e1000_led_off_ich8lan;
break;
case e1000_pchlan:
+ case e1000_pch2lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_pchlan;
/* setup LED */
@@ -544,10 +561,47 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+ /* Disable PHY configuration by hardware, config by software */
+ if (mac->type == e1000_pch2lan) {
+ u32 extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ }
+
return 0;
}
/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure. The bits in
+ * the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if (hw->phy.type != e1000_phy_82579)
+ goto out;
+
+ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (hw->dev_spec.ich8lan.eee_disable)
+ phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+ else
+ phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+out:
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -604,6 +658,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
*/
e1000e_check_downshift(hw);
+ /* Enable/Disable EEE after link up */
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ goto out;
+
/*
* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -647,10 +706,19 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
- if (hw->mac.type == e1000_pchlan)
- rc = e1000_init_phy_params_pchlan(hw);
- else
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
rc = e1000_init_phy_params_ich8lan(hw);
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ rc = e1000_init_phy_params_pchlan(hw);
+ break;
+ default:
+ break;
+ }
if (rc)
return rc;
@@ -663,6 +731,10 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(adapter->hw.phy.type == e1000_phy_igp_3))
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+ /* Disable EEE by default until IEEE802.3az spec is finalized */
+ if (adapter->flags2 & FLAG2_HAS_EEE)
+ adapter->hw.dev_spec.ich8lan.eee_disable = true;
+
return 0;
}
@@ -774,7 +846,7 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
* e1000_check_mng_mode_ich8lan - Checks management mode
* @hw: pointer to the HW structure
*
- * This checks if the adapter has manageability enabled.
+ * This checks if the adapter has any manageability enabled.
* This is a function pointer entry point only called by read/write
* routines for the PHY and NVM parts.
**/
@@ -783,9 +855,26 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
- return (fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -820,14 +909,6 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
- if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
- !(hw->mac.type == e1000_pchlan))
- return ret_val;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
/*
* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
@@ -835,12 +916,27 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
data = er32(FEXTNVM);
if (!(data & sw_cfg_mask))
@@ -851,8 +947,10 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* extended configuration before SW configuration
*/
data = er32(EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
- goto out;
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+ }
cnf_size = er32(EXTCNF_SIZE);
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -864,7 +962,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
+ ((hw->mac.type == e1000_pchlan) ||
+ (hw->mac.type == e1000_pch2lan))) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
@@ -1071,16 +1170,18 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
u32 mac_reg;
u16 oem_reg;
- if (hw->mac.type != e1000_pchlan)
+ if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- mac_reg = er32(EXTCNF_CTRL);
- if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
- goto out;
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ mac_reg = er32(EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto out;
+ }
mac_reg = er32(FEXTNVM);
if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
@@ -1221,6 +1322,243 @@ out:
}
/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i;
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ mac_reg = er32(RAL(i));
+ e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+ mac_reg = er32(RAH(i));
+ e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
+ }
+}
+
+static u32 e1000_calc_rx_da_crc(u8 mac[])
+{
+ u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
+ u32 i, j, mask, crc;
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ crc = crc ^ mac[i];
+ for (j = 8; j > 0; j--) {
+ mask = (crc & 1) * (-1);
+ crc = (crc >> 1) ^ (poly & mask);
+ }
+ }
+ return ~crc;
+}
+
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = 0;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* disable Rx path while enabling/disabling workaround */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ if (ret_val)
+ goto out;
+
+ if (enable) {
+ /*
+ * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ u8 mac_addr[ETH_ALEN] = {0};
+ u32 addr_high, addr_low;
+
+ addr_high = er32(RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = er32(RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+ ew32(PCH_RAICC(i),
+ e1000_calc_rx_da_crc(mac_addr));
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ ew32(RCTL, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Enable jumbo frame workaround in the PHY */
+ e1e_rphy(hw, PHY_REG(769, 20), &data);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ data |= (1 << 12);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x1A << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ if (ret_val)
+ goto out;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ ew32(FFLT_DBG, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Write PHY register values back to h/w defaults */
+ e1e_rphy(hw, PHY_REG(769, 20), &data);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 12);
+ data |= (1 << 13);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ if (ret_val)
+ goto out;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_lan_init_done_ich8lan - Check for PHY config completion
* @hw: pointer to the HW structure
*
@@ -1271,12 +1609,17 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto out;
break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ break;
default:
break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
+ if (hw->mac.type >= e1000_pchlan)
e1e_rphy(hw, BM_WUC, &reg);
/* Configure the LCD with the extended configuration region in NVM */
@@ -2800,6 +3143,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
ew32(FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
ew32(FCRTV_PCH, hw->fc.refresh_time);
@@ -2863,6 +3207,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
break;
case e1000_phy_82577:
+ case e1000_phy_82579:
ret_val = e1000_copper_link_setup_82577(hw);
if (ret_val)
return ret_val;
@@ -3116,21 +3461,12 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
- switch (hw->mac.type) {
- case e1000_ich8lan:
- case e1000_ich9lan:
- case e1000_ich10lan:
- case e1000_pchlan:
- phy_ctrl = er32(PHY_CTRL);
- phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
- E1000_PHY_CTRL_GBE_DISABLE;
- ew32(PHY_CTRL, phy_ctrl);
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+ ew32(PHY_CTRL, phy_ctrl);
- if (hw->mac.type == e1000_pchlan)
- e1000_phy_hw_reset_ich8lan(hw);
- default:
- break;
- }
+ if (hw->mac.type >= e1000_pchlan)
+ e1000_phy_hw_reset_ich8lan(hw);
}
/**
@@ -3370,6 +3706,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
/* Clear PHY statistics registers */
if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
@@ -3390,7 +3727,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
static struct e1000_mac_operations ich8_mac_ops = {
.id_led_init = e1000e_id_led_init,
- .check_mng_mode = e1000_check_mng_mode_ich8lan,
+ /* check_mng_mode dependent on mac type */
.check_for_link = e1000_check_for_copper_link_ich8lan,
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
@@ -3497,6 +3834,7 @@ struct e1000_info e1000_pch_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
| FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS,
.pba = 26,
.max_hw_frame_size = 4096,
.get_variants = e1000_get_variants_ich8lan,
@@ -3504,3 +3842,23 @@ struct e1000_info e1000_pch_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &ich8_nvm_ops,
};
+
+struct e1000_info e1000_pch2_info = {
+ .mac = e1000_pch2lan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_RX_CSUM_ENABLED
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 18,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a968e3a..df4a279 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57a7e41..6aa795a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,9 @@
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k4"
+#define DRV_EXTRAVERSION "-k2"
+
+#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -67,6 +69,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_ich9lan] = &e1000_ich9_info,
[board_ich10lan] = &e1000_ich10_info,
[board_pchlan] = &e1000_pch_info,
+ [board_pch2lan] = &e1000_pch2_info,
};
struct e1000_reg_info {
@@ -221,10 +224,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
+ (unsigned long long)buffer_info->dma,
buffer_info->length,
buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ (unsigned long long)buffer_info->time_stamp);
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
@@ -276,9 +279,11 @@ static void e1000e_dump(struct e1000_adapter *adapter)
"%04X %3X %016llX %p",
(!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
- le64_to_cpu(u0->a), le64_to_cpu(u0->b),
- (u64)buffer_info->dma, buffer_info->length,
- buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
buffer_info->skb);
if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
printk(KERN_CONT " NTC/U\n");
@@ -353,19 +358,19 @@ rx_ring_summary:
printk(KERN_INFO "RWB[0x%03X] %016llX "
"%016llX %016llX %016llX "
"---------------- %p", i,
- le64_to_cpu(u1->a),
- le64_to_cpu(u1->b),
- le64_to_cpu(u1->c),
- le64_to_cpu(u1->d),
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
buffer_info->skb);
} else {
printk(KERN_INFO "R [0x%03X] %016llX "
"%016llX %016llX %016llX %016llX %p", i,
- le64_to_cpu(u1->a),
- le64_to_cpu(u1->b),
- le64_to_cpu(u1->c),
- le64_to_cpu(u1->d),
- (u64)buffer_info->dma,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
buffer_info->skb);
if (netif_msg_pktdata(adapter))
@@ -402,9 +407,11 @@ rx_ring_summary:
buffer_info = &rx_ring->buffer_info[i];
u0 = (struct my_u0 *)rx_desc;
printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
- "%016llX %p",
- i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
- (u64)buffer_info->dma, buffer_info->skb);
+ "%016llX %p", i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (i == rx_ring->next_to_use)
printk(KERN_CONT " NTU\n");
else if (i == rx_ring->next_to_clean)
@@ -2723,6 +2730,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_wphy(hw, 22, phy_data);
}
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (rctl & E1000_RCTL_LPE)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+ }
+
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@@ -2759,7 +2776,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
* per packet.
*/
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
+ if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
(PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
else
@@ -3118,7 +3135,27 @@ void e1000e_reset(struct e1000_adapter *adapter)
* with ERT support assuming ERT set to E1000_ERT_2048), or
* - the full Rx FIFO size minus one full frame
*/
- if (hw->mac.type == e1000_pchlan) {
+ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+ fc->pause_time = 0xFFFF;
+ else
+ fc->pause_time = E1000_FC_PAUSE_TIME;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
+
+ switch (hw->mac.type) {
+ default:
+ if ((adapter->flags & FLAG_HAS_ERT) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN))
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - (E1000_ERT_2048 << 3)));
+ else
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - adapter->max_frame_size));
+
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+ fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pchlan:
/*
* Workaround PCH LOM adapter hangs with certain network
* loads. If hangs persist, try disabling Tx flow control.
@@ -3131,26 +3168,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->low_water = 0x3000;
}
fc->refresh_time = 0x1000;
- } else {
- if ((adapter->flags & FLAG_HAS_ERT) &&
- (adapter->netdev->mtu > ETH_DATA_LEN))
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - (E1000_ERT_2048 << 3)));
- else
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - adapter->max_frame_size));
-
- fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pch2lan:
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ fc->refresh_time = 0x0400;
+ break;
}
- if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
- fc->pause_time = 0xFFFF;
- else
- fc->pause_time = E1000_FC_PAUSE_TIME;
- fc->send_xon = 1;
- fc->current_mode = fc->requested_mode;
-
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -3162,8 +3188,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000_get_hw_control(adapter);
ew32(WUC, 0);
- if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
- e1e_wphy(&adapter->hw, BM_WUC, 0);
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
@@ -3419,13 +3443,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
/* disable SERR in case the MSI write causes a master abort */
pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
- pci_write_config_word(adapter->pdev, PCI_COMMAND,
- pci_cmd & ~PCI_COMMAND_SERR);
+ if (pci_cmd & PCI_COMMAND_SERR)
+ pci_write_config_word(adapter->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
err = e1000_test_msi_interrupt(adapter);
- /* restore previous setting of command word */
- pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ /* re-enable SERR */
+ if (pci_cmd & PCI_COMMAND_SERR) {
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_SERR;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ }
/* success ! */
if (!err)
@@ -3672,6 +3701,110 @@ static void e1000_update_phy_info(unsigned long data)
}
/**
+ * e1000e_update_phy_stats - Update the PHY statistics counters
+ * @adapter: board private structure
+ **/
+static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val;
+ u16 phy_data;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ hw->phy.addr = 1;
+
+#define HV_PHY_STATS_PAGE 778
+ /*
+ * A page set is expensive so check if already on desired page.
+ * If not, set to the page with the PHY status registers.
+ */
+ ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ &phy_data);
+ if (ret_val)
+ goto release;
+ if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (HV_PHY_STATS_PAGE <<
+ IGP_PAGE_SHIFT));
+ if (ret_val)
+ goto release;
+ }
+
+ /* Read/clear the upper 16-bit registers and read/accumulate lower */
+
+ /* Single Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.scc += phy_data;
+
+ /* Excessive Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.ecol += phy_data;
+
+ /* Multiple Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.mcc += phy_data;
+
+ /* Late Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_LATECOL_LOWER &
+ MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.latecol += phy_data;
+
+ /* Collision Count - also used for adaptive IFS */
+ e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ hw->mac.collision_delta = phy_data;
+
+ /* Defer Count */
+ e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.dc += phy_data;
+
+ /* Transmit with no CRS */
+ e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.tncrs += phy_data;
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
@@ -3680,7 +3813,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- u16 phy_data;
/*
* Prevent stats update while adapter is being reset, or if the pci
@@ -3700,34 +3832,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.roc += er32(ROC);
adapter->stats.mpc += er32(MPC);
- if ((hw->phy.type == e1000_phy_82578) ||
- (hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
- adapter->stats.scc += phy_data;
-
- e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
- adapter->stats.ecol += phy_data;
-
- e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
- adapter->stats.mcc += phy_data;
-
- e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
- adapter->stats.latecol += phy_data;
-
- e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
- adapter->stats.dc += phy_data;
- } else {
- adapter->stats.scc += er32(SCC);
- adapter->stats.ecol += er32(ECOL);
- adapter->stats.mcc += er32(MCC);
- adapter->stats.latecol += er32(LATECOL);
- adapter->stats.dc += er32(DC);
+
+ /* Half-duplex statistics */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
+ e1000e_update_phy_stats(adapter);
+ } else {
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+
+ hw->mac.collision_delta = er32(COLC);
+
+ if ((hw->mac.type != e1000_82574) &&
+ (hw->mac.type != e1000_82583))
+ adapter->stats.tncrs += er32(TNCRS);
+ }
+ adapter->stats.colc += hw->mac.collision_delta;
}
+
adapter->stats.xonrxc += er32(XONRXC);
adapter->stats.xontxc += er32(XONTXC);
adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3745,28 +3870,9 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
hw->mac.tx_packet_delta = er32(TPT);
adapter->stats.tpt += hw->mac.tx_packet_delta;
- if ((hw->phy.type == e1000_phy_82578) ||
- (hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
- hw->mac.collision_delta = phy_data;
- } else {
- hw->mac.collision_delta = er32(COLC);
- }
- adapter->stats.colc += hw->mac.collision_delta;
adapter->stats.algnerrc += er32(ALGNERRC);
adapter->stats.rxerrc += er32(RXERRC);
- if ((hw->phy.type == e1000_phy_82578) ||
- (hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
- adapter->stats.tncrs += phy_data;
- } else {
- if ((hw->mac.type != e1000_82574) &&
- (hw->mac.type != e1000_82583))
- adapter->stats.tncrs += er32(TNCRS);
- }
adapter->stats.cexterr += er32(CEXTERR);
adapter->stats.tsctc += er32(TSCTC);
adapter->stats.tsctfc += er32(TSCTFC);
@@ -3865,7 +3971,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
-bool e1000e_has_link(struct e1000_adapter *adapter)
+static bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = 0;
@@ -4841,14 +4947,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
int retval = 0;
/* copy MAC RARs to PHY RARs */
- for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
- mac_reg = er32(RAL(i));
- e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
- mac_reg = er32(RAH(i));
- e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
- }
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
/* copy MAC MTA to PHY MTA */
for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5899,6 +5998,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
@@ -5935,7 +6037,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
+ pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a150e48..34aeec1 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b4ac82d..3d3dc0c 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2319,6 +2319,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
case I82577_E_PHY_ID:
phy_type = e1000_phy_82577;
break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 0630980..0060e42 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0103"
+#define DRV_VERSION "EHEA_0105"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f547894..3beba70 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -335,7 +335,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
memset(stats, 0, sizeof(*stats));
- cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
+ cb2 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
@@ -867,6 +867,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
ehea_reset_cq_ep(pr->send_cq);
ehea_reset_cq_n1(pr->recv_cq);
ehea_reset_cq_n1(pr->send_cq);
+ rmb();
cqe = ehea_poll_rq1(pr->qp, &wqe_index);
cqe_skb = ehea_poll_cq(pr->send_cq);
@@ -2859,6 +2860,7 @@ static void ehea_reset_port(struct work_struct *work)
container_of(work, struct ehea_port, reset_task);
struct net_device *dev = port->netdev;
+ mutex_lock(&dlpar_mem_lock);
port->resets++;
mutex_lock(&port->port_lock);
netif_stop_queue(dev);
@@ -2881,6 +2883,7 @@ static void ehea_reset_port(struct work_struct *work)
netif_wake_queue(dev);
out:
mutex_unlock(&port->port_lock);
+ mutex_unlock(&dlpar_mem_lock);
}
static void ehea_rereg_mrs(struct work_struct *work)
@@ -3542,10 +3545,7 @@ static int ehea_mem_notifier(struct notifier_block *nb,
int ret = NOTIFY_BAD;
struct memory_notify *arg = data;
- if (!mutex_trylock(&dlpar_mem_lock)) {
- ehea_info("ehea_mem_notifier must not be called parallelized");
- goto out;
- }
+ mutex_lock(&dlpar_mem_lock);
switch (action) {
case MEM_CANCEL_OFFLINE:
@@ -3574,7 +3574,6 @@ static int ehea_mem_notifier(struct notifier_block *nb,
out_unlock:
mutex_unlock(&dlpar_mem_lock);
-out:
return ret;
}
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
index 1eb289f..d6dd1b4 100644
--- a/drivers/net/enic/cq_desc.h
+++ b/drivers/net/enic/cq_desc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 337d194..c2c0680 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -73,7 +73,16 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
-#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8
#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
@@ -96,7 +105,7 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
+ u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
@@ -136,7 +145,10 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
*vlan_stripped = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
- *vlan = le16_to_cpu(desc->vlan);
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
if (*fcoe) {
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 45e86d1..f239aa8 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -20,8 +20,6 @@
#ifndef _ENIC_H_
#define _ENIC_H_
-#include <linux/inet_lro.h>
-
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
@@ -34,12 +32,8 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.3.1.1-pp"
-#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
-#define PFX DRV_NAME ": "
-
-#define ENIC_LRO_MAX_DESC 8
-#define ENIC_LRO_MAX_AGGR 64
+#define DRV_VERSION "1.4.1.1"
+#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -116,6 +110,8 @@ struct enic {
spinlock_t wq_lock[ENIC_WQ_MAX];
unsigned int wq_count;
struct vlan_group *vlan_group;
+ u16 loop_enable;
+ u16 loop_tag;
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
@@ -124,8 +120,6 @@ struct enic {
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
@@ -137,4 +131,9 @@ struct enic {
unsigned int cq_count;
};
+static inline struct device *enic_get_dev(struct enic *enic)
+{
+ return &(enic->pdev->dev);
+}
+
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index bc7d6b9..77a7f87 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -29,12 +29,12 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <linux/if_link.h>
#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/rtnetlink.h>
#include <net/ip6_checksum.h>
#include "cq_enet_desc.h"
@@ -145,15 +145,25 @@ static int enic_get_settings(struct net_device *netdev,
return 0;
}
+static int enic_dev_fw_info(struct enic *enic,
+ struct vnic_devcmd_fw_info **fw_info)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_fw_info(enic->vdev, fw_info);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
static void enic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_devcmd_fw_info *fw_info;
- spin_lock(&enic->devcmd_lock);
- vnic_dev_fw_info(enic->vdev, &fw_info);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_fw_info(enic, &fw_info);
strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -191,6 +201,17 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_stats_dump(enic->vdev, vstats);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
static void enic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@@ -198,9 +219,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct vnic_stats *vstats;
unsigned int i;
- spin_lock(&enic->devcmd_lock);
- vnic_dev_stats_dump(enic->vdev, &vstats);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_stats_dump(enic, &vstats);
for (i = 0; i < enic_n_tx_stats; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
@@ -346,7 +365,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
};
static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -399,54 +417,55 @@ static void enic_log_q_error(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
if (error_status)
- printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
- enic->netdev->name, i, error_status);
+ netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
+ i, error_status);
}
for (i = 0; i < enic->rq_count; i++) {
error_status = vnic_rq_error_status(&enic->rq[i]);
if (error_status)
- printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
- enic->netdev->name, i, error_status);
+ netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
+ i, error_status);
}
}
-static void enic_link_check(struct enic *enic)
+static void enic_msglvl_check(struct enic *enic)
{
- int link_status = vnic_dev_link_status(enic->vdev);
- int carrier_ok = netif_carrier_ok(enic->netdev);
+ u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
- if (link_status && !carrier_ok) {
- printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
- netif_carrier_on(enic->netdev);
- } else if (!link_status && carrier_ok) {
- printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
- netif_carrier_off(enic->netdev);
+ if (msg_enable != enic->msg_enable) {
+ netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
+ enic->msg_enable, msg_enable);
+ enic->msg_enable = msg_enable;
}
}
static void enic_mtu_check(struct enic *enic)
{
u32 mtu = vnic_dev_mtu(enic->vdev);
+ struct net_device *netdev = enic->netdev;
if (mtu && mtu != enic->port_mtu) {
enic->port_mtu = mtu;
- if (mtu < enic->netdev->mtu)
- printk(KERN_WARNING PFX
- "%s: interface MTU (%d) set higher "
+ if (mtu < netdev->mtu)
+ netdev_warn(netdev,
+ "interface MTU (%d) set higher "
"than switch port MTU (%d)\n",
- enic->netdev->name, enic->netdev->mtu, mtu);
+ netdev->mtu, mtu);
}
}
-static void enic_msglvl_check(struct enic *enic)
+static void enic_link_check(struct enic *enic)
{
- u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
+ int link_status = vnic_dev_link_status(enic->vdev);
+ int carrier_ok = netif_carrier_ok(enic->netdev);
- if (msg_enable != enic->msg_enable) {
- printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
- enic->netdev->name, enic->msg_enable, msg_enable);
- enic->msg_enable = msg_enable;
+ if (link_status && !carrier_ok) {
+ netdev_info(enic->netdev, "Link UP\n");
+ netif_carrier_on(enic->netdev);
+ } else if (!link_status && carrier_ok) {
+ netdev_info(enic->netdev, "Link DOWN\n");
+ netif_carrier_off(enic->netdev);
}
}
@@ -574,7 +593,7 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
static inline void enic_queue_wq_skb_cont(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb,
- unsigned int len_left)
+ unsigned int len_left, int loopback)
{
skb_frag_t *frag;
@@ -586,13 +605,14 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE),
frag->size,
- (len_left == 0)); /* EOP? */
+ (len_left == 0), /* EOP? */
+ loopback);
}
}
static inline void enic_queue_wq_skb_vlan(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb,
- int vlan_tag_insert, unsigned int vlan_tag)
+ int vlan_tag_insert, unsigned int vlan_tag, int loopback)
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
@@ -608,15 +628,15 @@ static inline void enic_queue_wq_skb_vlan(struct enic *enic,
head_len, PCI_DMA_TODEVICE),
head_len,
vlan_tag_insert, vlan_tag,
- eop);
+ eop, loopback);
if (!eop)
- enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+ enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
}
static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb,
- int vlan_tag_insert, unsigned int vlan_tag)
+ int vlan_tag_insert, unsigned int vlan_tag, int loopback)
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
@@ -636,15 +656,15 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
csum_offset,
hdr_len,
vlan_tag_insert, vlan_tag,
- eop);
+ eop, loopback);
if (!eop)
- enic_queue_wq_skb_cont(enic, wq, skb, len_left);
+ enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
}
static inline void enic_queue_wq_skb_tso(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
- int vlan_tag_insert, unsigned int vlan_tag)
+ int vlan_tag_insert, unsigned int vlan_tag, int loopback)
{
unsigned int frag_len_left = skb_headlen(skb);
unsigned int len_left = skb->len - frag_len_left;
@@ -681,7 +701,7 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
len,
mss, hdr_len,
vlan_tag_insert, vlan_tag,
- eop && (len == frag_len_left));
+ eop && (len == frag_len_left), loopback);
frag_len_left -= len;
offset += len;
}
@@ -707,7 +727,8 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
dma_addr,
len,
(len_left == 0) &&
- (len == frag_len_left)); /* EOP? */
+ (len == frag_len_left), /* EOP? */
+ loopback);
frag_len_left -= len;
offset += len;
}
@@ -720,22 +741,26 @@ static inline void enic_queue_wq_skb(struct enic *enic,
unsigned int mss = skb_shinfo(skb)->gso_size;
unsigned int vlan_tag = 0;
int vlan_tag_insert = 0;
+ int loopback = 0;
if (enic->vlan_group && vlan_tx_tag_present(skb)) {
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = vlan_tx_tag_get(skb);
+ } else if (enic->loop_enable) {
+ vlan_tag = enic->loop_tag;
+ loopback = 1;
}
if (mss)
enic_queue_wq_skb_tso(enic, wq, skb, mss,
- vlan_tag_insert, vlan_tag);
+ vlan_tag_insert, vlan_tag, loopback);
else if (skb->ip_summed == CHECKSUM_PARTIAL)
enic_queue_wq_skb_csum_l4(enic, wq, skb,
- vlan_tag_insert, vlan_tag);
+ vlan_tag_insert, vlan_tag, loopback);
else
enic_queue_wq_skb_vlan(enic, wq, skb,
- vlan_tag_insert, vlan_tag);
+ vlan_tag_insert, vlan_tag, loopback);
}
/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -769,8 +794,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_stop_queue(netdev);
/* This is a hard error, log it */
- printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
- "queue awake!\n", netdev->name);
+ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
spin_unlock_irqrestore(&enic->wq_lock[0], flags);
return NETDEV_TX_BUSY;
}
@@ -792,9 +816,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
struct net_device_stats *net_stats = &netdev->stats;
struct vnic_stats *stats;
- spin_lock(&enic->devcmd_lock);
- vnic_dev_stats_dump(enic->vdev, &stats);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_stats_dump(enic, &stats);
net_stats->tx_packets = stats->tx.tx_frames_ok;
net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -812,9 +834,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
return net_stats;
}
-static void enic_reset_mcaddrs(struct enic *enic)
+static void enic_reset_multicast_list(struct enic *enic)
{
enic->mc_count = 0;
+ enic->flags = 0;
}
static int enic_set_mac_addr(struct net_device *netdev, char *addr)
@@ -891,6 +914,41 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return -EOPNOTSUPP;
}
+static int enic_dev_packet_filter(struct enic *enic, int directed,
+ int multicast, int broadcast, int promisc, int allmulti)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
/* netif_tx_lock held, BHs disabled */
static void enic_set_multicast_list(struct net_device *netdev)
{
@@ -910,11 +968,9 @@ static void enic_set_multicast_list(struct net_device *netdev)
if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
- spin_lock(&enic->devcmd_lock);
-
if (enic->flags != flags) {
enic->flags = flags;
- vnic_dev_packet_filter(enic->vdev, directed,
+ enic_dev_packet_filter(enic, directed,
multicast, broadcast, promisc, allmulti);
}
@@ -937,7 +993,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
mc_addr[j]) == 0)
break;
if (j == mc_count)
- enic_del_multicast_addr(enic, enic->mc_addr[i]);
+ enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
}
for (i = 0; i < mc_count; i++) {
@@ -946,7 +1002,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
enic->mc_addr[j]) == 0)
break;
if (j == enic->mc_count)
- enic_add_multicast_addr(enic, mc_addr[i]);
+ enic_dev_add_multicast_addr(enic, mc_addr[i]);
}
/* Save the list to compare against next time
@@ -956,8 +1012,6 @@ static void enic_set_multicast_list(struct net_device *netdev)
memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
enic->mc_count = mc_count;
-
- spin_unlock(&enic->devcmd_lock);
}
/* rtnl lock is held */
@@ -1226,7 +1280,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
struct enic *enic = vnic_dev_priv(rq->vdev);
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
- unsigned int len = netdev->mtu + ETH_HLEN;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
unsigned int os_buf_index = 0;
dma_addr_t dma_addr;
@@ -1263,12 +1317,24 @@ static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
return 0;
}
+static int enic_dev_hw_version(struct enic *enic,
+ enum vnic_dev_hw_version *hw_ver)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_hw_version(enic->vdev, hw_ver);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
static int enic_set_rq_alloc_buf(struct enic *enic)
{
enum vnic_dev_hw_version hw_ver;
int err;
- err = vnic_dev_hw_version(enic->vdev, &hw_ver);
+ err = enic_dev_hw_version(enic, &hw_ver);
if (err)
return err;
@@ -1287,51 +1353,6 @@ static int enic_set_rq_alloc_buf(struct enic *enic)
return 0;
}
-static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- struct cq_enet_rq_desc *cq_desc = priv;
- unsigned int ip_len;
- struct iphdr *iph;
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan, checksum;
- u32 rss_hash;
-
- cq_enet_rq_desc_dec(cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (!(ipv4 && tcp && !ipv4_fragment))
- return -1;
-
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
-
- /* check if ip header and tcp header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *tcph = tcp_hdr(skb);
- *iphdr = iph;
-
- return 0;
-}
-
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -1345,7 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan, checksum;
+ u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
u32 rss_hash;
if (skipped)
@@ -1360,7 +1381,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
&type, &color, &q_number, &completed_index,
&ingress_port, &fcoe, &eop, &sop, &rss_type,
&csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan, &checksum,
+ &packet_error, &vlan_stripped, &vlan_tci, &checksum,
&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
@@ -1395,20 +1416,20 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->dev = netdev;
- if (enic->vlan_group && vlan_stripped) {
+ if (enic->vlan_group && vlan_stripped &&
+ (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
- if ((netdev->features & NETIF_F_LRO) && ipv4)
- lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
- skb, enic->vlan_group,
- vlan, cq_desc);
+ if (netdev->features & NETIF_F_GRO)
+ vlan_gro_receive(&enic->napi, enic->vlan_group,
+ vlan_tci, skb);
else
vlan_hwaccel_receive_skb(skb,
- enic->vlan_group, vlan);
+ enic->vlan_group, vlan_tci);
} else {
- if ((netdev->features & NETIF_F_LRO) && ipv4)
- lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
+ if (netdev->features & NETIF_F_GRO)
+ napi_gro_receive(&enic->napi, skb);
else
netif_receive_skb(skb);
@@ -1438,7 +1459,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
static int enic_poll(struct napi_struct *napi, int budget)
{
struct enic *enic = container_of(napi, struct enic, napi);
- struct net_device *netdev = enic->netdev;
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int work_done, rq_work_done, wq_work_done;
@@ -1478,12 +1498,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (rq_work_done < rq_work_to_do) {
/* Some work done, but not enough to stay in polling,
- * flush all LROs and exit polling
+ * exit polling
*/
- if (netdev->features & NETIF_F_LRO)
- lro_flush_all(&enic->lro_mgr);
-
napi_complete(napi);
vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
}
@@ -1494,7 +1511,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
static int enic_poll_msix(struct napi_struct *napi, int budget)
{
struct enic *enic = container_of(napi, struct enic, napi);
- struct net_device *netdev = enic->netdev;
unsigned int work_to_do = budget;
unsigned int work_done;
int err;
@@ -1528,12 +1544,9 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
- * flush all LROs and exit polling
+ * exit polling
*/
- if (netdev->features & NETIF_F_LRO)
- lro_flush_all(&enic->lro_mgr);
-
napi_complete(napi);
vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
}
@@ -1655,7 +1668,7 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static int enic_notify_set(struct enic *enic)
+static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -1676,6 +1689,39 @@ static int enic_notify_set(struct enic *enic)
return err;
}
+static int enic_dev_notify_unset(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_notify_unset(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_enable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_disable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_disable(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
static void enic_notify_timer_start(struct enic *enic)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1697,16 +1743,14 @@ static int enic_open(struct net_device *netdev)
err = enic_request_intr(enic);
if (err) {
- printk(KERN_ERR PFX "%s: Unable to request irq.\n",
- netdev->name);
+ netdev_err(netdev, "Unable to request irq.\n");
return err;
}
- err = enic_notify_set(enic);
+ err = enic_dev_notify_set(enic);
if (err) {
- printk(KERN_ERR PFX
- "%s: Failed to alloc notify buffer, aborting.\n",
- netdev->name);
+ netdev_err(netdev,
+ "Failed to alloc notify buffer, aborting.\n");
goto err_out_free_intr;
}
@@ -1714,9 +1758,7 @@ static int enic_open(struct net_device *netdev)
vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
- printk(KERN_ERR PFX
- "%s: Unable to alloc receive buffers.\n",
- netdev->name);
+ netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
goto err_out_notify_unset;
}
@@ -1732,9 +1774,7 @@ static int enic_open(struct net_device *netdev)
netif_wake_queue(netdev);
napi_enable(&enic->napi);
- spin_lock(&enic->devcmd_lock);
- vnic_dev_enable(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_enable(enic);
for (i = 0; i < enic->intr_count; i++)
vnic_intr_unmask(&enic->intr[i]);
@@ -1744,9 +1784,7 @@ static int enic_open(struct net_device *netdev)
return 0;
err_out_notify_unset:
- spin_lock(&enic->devcmd_lock);
- vnic_dev_notify_unset(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_notify_unset(enic);
err_out_free_intr:
enic_free_intr(enic);
@@ -1760,20 +1798,19 @@ static int enic_stop(struct net_device *netdev)
unsigned int i;
int err;
- for (i = 0; i < enic->intr_count; i++)
+ for (i = 0; i < enic->intr_count; i++) {
vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
enic_synchronize_irqs(enic);
del_timer_sync(&enic->notify_timer);
- spin_lock(&enic->devcmd_lock);
- vnic_dev_disable(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_disable(enic);
napi_disable(&enic->napi);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
-
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
@@ -1787,9 +1824,7 @@ static int enic_stop(struct net_device *netdev)
return err;
}
- spin_lock(&enic->devcmd_lock);
- vnic_dev_notify_unset(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_notify_unset(enic);
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
@@ -1818,10 +1853,9 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
if (netdev->mtu > enic->port_mtu)
- printk(KERN_WARNING PFX
- "%s: interface MTU (%d) set higher "
- "than port MTU (%d)\n",
- netdev->name, netdev->mtu, enic->port_mtu);
+ netdev_warn(netdev,
+ "interface MTU (%d) set higher than port MTU (%d)\n",
+ netdev->mtu, enic->port_mtu);
if (running)
enic_open(netdev);
@@ -1894,21 +1928,21 @@ static int enic_dev_open(struct enic *enic)
err = enic_dev_wait(enic->vdev, vnic_dev_open,
vnic_dev_open_done, 0);
if (err)
- printk(KERN_ERR PFX
- "vNIC device open failed, err %d.\n", err);
+ dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
+ err);
return err;
}
-static int enic_dev_soft_reset(struct enic *enic)
+static int enic_dev_hang_reset(struct enic *enic)
{
int err;
- err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
- vnic_dev_soft_reset_done, 0);
+ err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
+ vnic_dev_hang_reset_done, 0);
if (err)
- printk(KERN_ERR PFX
- "vNIC soft reset failed, err %d.\n", err);
+ netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
+ err);
return err;
}
@@ -1922,15 +1956,43 @@ static int enic_set_niccfg(struct enic *enic)
const u8 rss_enable = 0;
const u8 tso_ipid_split_en = 0;
const u8 ig_vlan_strip_en = 1;
+ int err;
/* Enable VLAN tag stripping. RSS not enabled (yet).
*/
- return enic_set_nic_cfg(enic,
+ spin_lock(&enic->devcmd_lock);
+ err = enic_set_nic_cfg(enic,
rss_default_cpu, rss_hash_type,
rss_hash_bits, rss_base_cpu,
rss_enable, tso_ipid_split_en,
ig_vlan_strip_en);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_hang_notify(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_hang_notify(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
static void enic_reset(struct work_struct *work)
@@ -1942,16 +2004,13 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->devcmd_lock);
- vnic_dev_hang_notify(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
+ enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
- enic_dev_soft_reset(enic);
- vnic_dev_init(enic->vdev, 0);
- enic_reset_mcaddrs(enic);
+ enic_dev_hang_reset(enic);
+ enic_reset_multicast_list(enic);
enic_init_vnic_resources(enic);
enic_set_niccfg(enic);
+ enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
rtnl_unlock();
@@ -2087,8 +2146,8 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = enic_set_multicast_list,
.ndo_set_mac_address = enic_set_mac_address,
+ .ndo_set_multicast_list = enic_set_multicast_list,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2106,8 +2165,20 @@ void enic_dev_deinit(struct enic *enic)
enic_clear_intr_mode(enic);
}
+static int enic_dev_stats_clear(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_stats_clear(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
int enic_dev_init(struct enic *enic)
{
+ struct device *dev = enic_get_dev(enic);
struct net_device *netdev = enic->netdev;
int err;
@@ -2116,8 +2187,7 @@ int enic_dev_init(struct enic *enic)
err = enic_get_vnic_config(enic);
if (err) {
- printk(KERN_ERR PFX
- "Get vNIC configuration failed, aborting.\n");
+ dev_err(dev, "Get vNIC configuration failed, aborting\n");
return err;
}
@@ -2132,9 +2202,8 @@ int enic_dev_init(struct enic *enic)
err = enic_set_intr_mode(enic);
if (err) {
- printk(KERN_ERR PFX
- "Failed to set intr mode based on resource "
- "counts and system capabilities, aborting.\n");
+ dev_err(dev, "Failed to set intr mode based on resource "
+ "counts and system capabilities, aborting\n");
return err;
}
@@ -2143,24 +2212,32 @@ int enic_dev_init(struct enic *enic)
err = enic_alloc_vnic_resources(enic);
if (err) {
- printk(KERN_ERR PFX
- "Failed to alloc vNIC resources, aborting.\n");
+ dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
goto err_out_free_vnic_resources;
}
enic_init_vnic_resources(enic);
+ /* Clear LIF stats
+ */
+ enic_dev_stats_clear(enic);
+
err = enic_set_rq_alloc_buf(enic);
if (err) {
- printk(KERN_ERR PFX
- "Failed to set RQ buffer allocator, aborting.\n");
+ dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
goto err_out_free_vnic_resources;
}
err = enic_set_niccfg(enic);
if (err) {
- printk(KERN_ERR PFX
- "Failed to config nic, aborting.\n");
+ dev_err(dev, "Failed to config nic, aborting\n");
+ goto err_out_free_vnic_resources;
+ }
+
+ err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
goto err_out_free_vnic_resources;
}
@@ -2194,6 +2271,7 @@ static void enic_iounmap(struct enic *enic)
static int __devinit enic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct device *dev = &pdev->dev;
struct net_device *netdev;
struct enic *enic;
int using_dac = 0;
@@ -2206,7 +2284,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netdev = alloc_etherdev(sizeof(struct enic));
if (!netdev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return -ENOMEM;
}
@@ -2221,17 +2299,15 @@ static int __devinit enic_probe(struct pci_dev *pdev,
/* Setup PCI resources
*/
- err = pci_enable_device(pdev);
+ err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR PFX
- "Cannot enable PCI device, aborting.\n");
+ dev_err(dev, "Cannot enable PCI device, aborting\n");
goto err_out_free_netdev;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR PFX
- "Cannot request PCI regions, aborting.\n");
+ dev_err(dev, "Cannot request PCI regions, aborting\n");
goto err_out_disable_device;
}
@@ -2246,23 +2322,20 @@ static int __devinit enic_probe(struct pci_dev *pdev,
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX
- "No usable DMA configuration, aborting.\n");
+ dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX
- "Unable to obtain 32-bit DMA "
- "for consistent allocations, aborting.\n");
+ dev_err(dev, "Unable to obtain %u-bit DMA "
+ "for consistent allocations, aborting\n", 32);
goto err_out_release_regions;
}
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
- printk(KERN_ERR PFX
- "Unable to obtain 40-bit DMA "
- "for consistent allocations, aborting.\n");
+ dev_err(dev, "Unable to obtain %u-bit DMA "
+ "for consistent allocations, aborting\n", 40);
goto err_out_release_regions;
}
using_dac = 1;
@@ -2277,8 +2350,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->bar[i].len = pci_resource_len(pdev, i);
enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
if (!enic->bar[i].vaddr) {
- printk(KERN_ERR PFX
- "Cannot memory-map BAR %d, aborting.\n", i);
+ dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
err = -ENODEV;
goto err_out_iounmap;
}
@@ -2291,8 +2363,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
ARRAY_SIZE(enic->bar));
if (!enic->vdev) {
- printk(KERN_ERR PFX
- "vNIC registration failed, aborting.\n");
+ dev_err(dev, "vNIC registration failed, aborting\n");
err = -ENODEV;
goto err_out_iounmap;
}
@@ -2302,8 +2373,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
err = enic_dev_open(enic);
if (err) {
- printk(KERN_ERR PFX
- "vNIC dev open failed, aborting.\n");
+ dev_err(dev, "vNIC dev open failed, aborting\n");
goto err_out_vnic_unregister;
}
@@ -2317,23 +2387,31 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
+ /* Do not call dev_init for a dynamic vnic.
+ * For a dynamic vnic, init_prov_info will be
+ * called later by an upper layer.
+ */
+
if (!enic_is_dynamic(enic)) {
err = vnic_dev_init(enic->vdev, 0);
if (err) {
- printk(KERN_ERR PFX
- "vNIC dev init failed, aborting.\n");
+ dev_err(dev, "vNIC dev init failed, aborting\n");
goto err_out_dev_close;
}
}
+ /* Setup devcmd lock
+ */
+
+ spin_lock_init(&enic->devcmd_lock);
+
err = enic_dev_init(enic);
if (err) {
- printk(KERN_ERR PFX
- "Device initialization failed, aborting.\n");
+ dev_err(dev, "Device initialization failed, aborting\n");
goto err_out_dev_close;
}
- /* Setup notification timer, HW reset task, and locks
+ /* Setup notification timer, HW reset task, and wq locks
*/
init_timer(&enic->notify_timer);
@@ -2345,8 +2423,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
for (i = 0; i < enic->wq_count; i++)
spin_lock_init(&enic->wq_lock[i]);
- spin_lock_init(&enic->devcmd_lock);
-
/* Register net device
*/
@@ -2355,8 +2431,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
err = enic_set_mac_addr(netdev, enic->mac_addr);
if (err) {
- printk(KERN_ERR PFX
- "Invalid MAC address, aborting.\n");
+ dev_err(dev, "Invalid MAC address, aborting\n");
goto err_out_dev_deinit;
}
@@ -2372,31 +2447,27 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netdev->ethtool_ops = &enic_ethtool_ops;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ if (ENIC_SETTING(enic, LOOP)) {
+ netdev->features &= ~NETIF_F_HW_VLAN_TX;
+ enic->loop_enable = 1;
+ enic->loop_tag = enic->config.loop_tag;
+ dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
+ }
if (ENIC_SETTING(enic, TXCSUM))
netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (ENIC_SETTING(enic, TSO))
netdev->features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
if (ENIC_SETTING(enic, LRO))
- netdev->features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_GRO;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
- enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
- enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
- enic->lro_mgr.lro_arr = enic->lro_desc;
- enic->lro_mgr.get_skb_header = enic_get_skb_header;
- enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- enic->lro_mgr.dev = netdev;
- enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
- enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
err = register_netdev(netdev);
if (err) {
- printk(KERN_ERR PFX
- "Cannot register net device, aborting.\n");
+ dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
@@ -2450,7 +2521,7 @@ static struct pci_driver enic_driver = {
static int __init enic_init_module(void)
{
- printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
return pci_register_driver(&enic_driver);
}
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 9b18840..29ede8a 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -46,7 +46,8 @@ int enic_get_vnic_config(struct enic *enic)
err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
if (err) {
- printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
+ dev_err(enic_get_dev(enic),
+ "Error getting MAC addr, %d\n", err);
return err;
}
@@ -56,7 +57,7 @@ int enic_get_vnic_config(struct enic *enic)
offsetof(struct vnic_enet_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
- printk(KERN_ERR PFX \
+ dev_err(enic_get_dev(enic), \
"Error getting %s, %d\n", #m, err); \
return err; \
} \
@@ -69,6 +70,7 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
GET_CONFIG(intr_timer_usec);
+ GET_CONFIG(loop_tag);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -92,10 +94,10 @@ int enic_get_vnic_config(struct enic *enic)
INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
c->intr_timer_usec);
- printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
+ dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
- printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
- "intr timer %d usec\n",
+ dev_info(enic_get_dev(enic), "vNIC mtu %d csum tx/rx %d/%d "
+ "tso/lro %d/%d intr timer %d usec\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
ENIC_SETTING(enic, LRO), c->intr_timer_usec);
@@ -103,17 +105,7 @@ int enic_get_vnic_config(struct enic *enic)
return 0;
}
-void enic_add_multicast_addr(struct enic *enic, u8 *addr)
-{
- vnic_dev_add_addr(enic->vdev, addr);
-}
-
-void enic_del_multicast_addr(struct enic *enic, u8 *addr)
-{
- vnic_dev_del_addr(enic->vdev, addr);
-}
-
-void enic_add_vlan(struct enic *enic, u16 vlanid)
+int enic_add_vlan(struct enic *enic, u16 vlanid)
{
u64 a0 = vlanid, a1 = 0;
int wait = 1000;
@@ -121,10 +113,12 @@ void enic_add_vlan(struct enic *enic, u16 vlanid)
err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
if (err)
- printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
+ dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
+
+ return err;
}
-void enic_del_vlan(struct enic *enic, u16 vlanid)
+int enic_del_vlan(struct enic *enic, u16 vlanid)
{
u64 a0 = vlanid, a1 = 0;
int wait = 1000;
@@ -132,7 +126,9 @@ void enic_del_vlan(struct enic *enic, u16 vlanid)
err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
if (err)
- printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
+ dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
+
+ return err;
}
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
@@ -198,8 +194,8 @@ void enic_get_res_counts(struct enic *enic)
vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL),
ENIC_INTR_MAX);
- printk(KERN_INFO PFX "vNIC resources avail: "
- "wq %d rq %d cq %d intr %d\n",
+ dev_info(enic_get_dev(enic),
+ "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic->rq_count,
enic->cq_count, enic->intr_count);
}
@@ -304,11 +300,6 @@ void enic_init_vnic_resources(struct enic *enic)
enic->config.intr_timer_type,
mask_on_assertion);
}
-
- /* Clear LIF stats
- */
-
- vnic_dev_stats_clear(enic->vdev);
}
int enic_alloc_vnic_resources(struct enic *enic)
@@ -319,15 +310,14 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode = vnic_dev_get_intr_mode(enic->vdev);
- printk(KERN_INFO PFX "vNIC resources used: "
+ dev_info(enic_get_dev(enic), "vNIC resources used: "
"wq %d rq %d cq %d intr %d intr mode %s\n",
enic->wq_count, enic->rq_count,
enic->cq_count, enic->intr_count,
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
- "unknown"
- );
+ "unknown");
/* Allocate queue resources
*/
@@ -373,7 +363,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
enic->legacy_pba = vnic_dev_get_res(enic->vdev,
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
+ dev_err(enic_get_dev(enic),
+ "Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 494664f..83bd172 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -43,7 +43,7 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int mss_or_csum_offset, unsigned int hdr_len,
int vlan_tag_insert, unsigned int vlan_tag,
- int offload_mode, int cq_entry, int sop, int eop)
+ int offload_mode, int cq_entry, int sop, int eop, int loopback)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
@@ -56,61 +56,62 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
- 0 /* loopback */);
+ (u8)loopback);
vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ int eop, int loopback)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
0, 0, 0, 0, 0,
- eop, 0 /* !SOP */, eop);
+ eop, 0 /* !SOP */, eop, loopback);
}
static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
- unsigned int vlan_tag, int eop)
+ unsigned int vlan_tag, int eop, int loopback)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
0, 0, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM,
- eop, 1 /* SOP */, eop);
+ eop, 1 /* SOP */, eop, loopback);
}
static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
int ip_csum, int tcpudp_csum, int vlan_tag_insert,
- unsigned int vlan_tag, int eop)
+ unsigned int vlan_tag, int eop, int loopback)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
0, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM,
- eop, 1 /* SOP */, eop);
+ eop, 1 /* SOP */, eop, loopback);
}
static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int csum_offset, unsigned int hdr_len,
- int vlan_tag_insert, unsigned int vlan_tag, int eop)
+ int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM_L4,
- eop, 1 /* SOP */, eop);
+ eop, 1 /* SOP */, eop, loopback);
}
static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
- unsigned int vlan_tag, int eop)
+ unsigned int vlan_tag, int eop, int loopback)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
mss, hdr_len, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_TSO,
- eop, 1 /* SOP */, eop);
+ eop, 1 /* SOP */, eop, loopback);
}
static inline void enic_queue_rq_desc(struct vnic_rq *rq,
@@ -131,10 +132,8 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
struct enic;
int enic_get_vnic_config(struct enic *);
-void enic_add_multicast_addr(struct enic *enic, u8 *addr);
-void enic_del_multicast_addr(struct enic *enic, u8 *addr);
-void enic_add_vlan(struct enic *enic, u16 vlanid);
-void enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_add_vlan(struct enic *enic, u16 vlanid);
+int enic_del_vlan(struct enic *enic, u16 vlanid);
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en);
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
index a06e649..e6dd309 100644
--- a/drivers/net/enic/rq_enet_desc.h
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
index 020ae6c..b86d6ef 100644
--- a/drivers/net/enic/vnic_cq.c
+++ b/drivers/net/enic/vnic_cq.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -42,7 +42,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
- printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
+ pr_err("Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
index 114763c..552d3da 100644
--- a/drivers/net/enic/vnic_cq.h
+++ b/drivers/net/enic/vnic_cq.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index e0d3328..6a5b578 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -23,21 +23,23 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
-#include <linux/slab.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+};
+
struct vnic_res {
void __iomem *vaddr;
dma_addr_t bus_addr;
unsigned int count;
};
-#define VNIC_DEV_CAP_INIT 0x0001
-
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
@@ -48,13 +50,14 @@ struct vnic_dev {
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 notify_sz;
- u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
- u32 cap_flags;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
};
#define VNIC_MAX_RES_HDR_SIZE \
@@ -78,19 +81,19 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
- printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
+ pr_err("vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
if (!rh) {
- printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
+ pr_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
ioread32(&rh->version) != VNIC_RES_VERSION) {
- printk(KERN_ERR "vNIC BAR0 res magic/version error "
+ pr_err("vNIC BAR0 res magic/version error "
"exp (%lx/%lx) curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
@@ -122,7 +125,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar[bar_num].len) {
- printk(KERN_ERR "vNIC BAR0 resource %d "
+ pr_err("vNIC BAR0 resource %d "
"out-of-bounds, offset 0x%x + "
"size 0x%x > bar len 0x%lx\n",
type, bar_offset,
@@ -229,8 +232,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
- printk(KERN_ERR
- "Failed to allocate ring (size=%d), aborting\n",
+ pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
@@ -258,23 +260,28 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
}
}
-int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
- u64 *a0, u64 *a1, int wait)
+static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+ unsigned int i;
int delay;
u32 status;
int err;
status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
if (status & STAT_BUSY) {
- printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
+ pr_err("Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
- writeq(*a0, &devcmd->args[0]);
- writeq(*a1, &devcmd->args[1]);
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ writeq(vdev->args[i], &devcmd->args[i]);
wmb();
}
@@ -288,31 +295,110 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
udelay(100);
status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+
if (!(status & STAT_BUSY)) {
if (status & STAT_ERROR) {
err = (int)readq(&devcmd->args[0]);
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- printk(KERN_ERR "Error %d devcmd %d\n",
+ pr_err("Error %d devcmd %d\n",
err, _CMD_N(cmd));
return err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
rmb();
- *a0 = readq(&devcmd->args[0]);
- *a1 = readq(&devcmd->args[1]);
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ vdev->args[i] = readq(&devcmd->args[i]);
}
return 0;
}
}
- printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
+ pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
+static int vnic_dev_cmd_proxy_by_bdf(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ u32 status;
+ int err;
+
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ vdev->args[0] = vdev->proxy_index; /* bdf */
+ vdev->args[1] = cmd;
+ vdev->args[2] = *a0;
+ vdev->args[3] = *a1;
+
+ err = _vnic_dev_cmd(vdev, CMD_PROXY_BY_BDF, wait);
+ if (err)
+ return err;
+
+ status = (u32)vdev->args[0];
+ if (status & STAT_ERROR) {
+ err = (int)vdev->args[1];
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ return err;
+ }
+
+ *a0 = vdev->args[1];
+ *a1 = vdev->args[2];
+
+ return 0;
+}
+
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+
+ vdev->args[0] = *a0;
+ vdev->args[1] = *a1;
+
+ err = _vnic_dev_cmd(vdev, cmd, wait);
+
+ *a0 = vdev->args[0];
+ *a1 = vdev->args[1];
+
+ return err;
+}
+
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
+{
+ vdev->proxy = PROXY_BY_BDF;
+ vdev->proxy_index = bdf;
+}
+
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
+{
+ vdev->proxy = PROXY_NONE;
+ vdev->proxy_index = 0;
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ switch (vdev->proxy) {
+ case PROXY_BY_BDF:
+ return vnic_dev_cmd_proxy_by_bdf(vdev, cmd, a0, a1, wait);
+ case PROXY_NONE:
+ default:
+ return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+ }
+}
+
static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -431,6 +517,19 @@ int vnic_dev_enable(struct vnic_dev *vdev)
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
}
+int vnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN)
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+
+ return err;
+}
+
int vnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
@@ -486,6 +585,44 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
return 0;
}
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN) {
+ err = vnic_dev_soft_reset(vdev, arg);
+ if (err)
+ return err;
+
+ return vnic_dev_init(vdev, 0);
+ }
+
+ return err;
+}
+
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
+ if (err) {
+ if (err == ERR_ECMDUNKNOWN)
+ return vnic_dev_soft_reset_done(vdev, done);
+ return err;
+ }
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
u64 a0, a1;
@@ -512,7 +649,7 @@ int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
return 0;
}
-void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
u64 a0, a1 = 0;
@@ -527,7 +664,29 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
- printk(KERN_ERR "Can't set packet filter\n");
+ pr_err("Can't set packet filter\n");
+
+ return err;
+}
+
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+ int multicast, int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't set packet filter\n");
+
+ return err;
}
int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
@@ -542,7 +701,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
return err;
}
@@ -559,7 +718,21 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
+
+ return err;
+}
+
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode)
+{
+ u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN)
+ return 0;
return err;
}
@@ -572,8 +745,7 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
if (err)
- printk(KERN_ERR "Failed to raise INTR[%d], err %d\n",
- intr, err);
+ pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
return err;
}
@@ -604,8 +776,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
dma_addr_t notify_pa;
if (vdev->notify || vdev->notify_pa) {
- printk(KERN_ERR "notify block %p still allocated",
- vdev->notify);
+ pr_err("notify block %p still allocated", vdev->notify);
return -EINVAL;
}
@@ -618,22 +789,25 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
}
-void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
+ int err;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
- vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
vdev->notify = NULL;
vdev->notify_pa = 0;
vdev->notify_sz = 0;
+
+ return err;
}
-void vnic_dev_notify_unset(struct vnic_dev *vdev)
+int vnic_dev_notify_unset(struct vnic_dev *vdev)
{
if (vdev->notify) {
pci_free_consistent(vdev->pdev,
@@ -642,7 +816,7 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
vdev->notify_pa);
}
- vnic_dev_notify_unsetcmd(vdev);
+ return vnic_dev_notify_unsetcmd(vdev);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
@@ -672,13 +846,14 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
int wait = 1000;
int r = 0;
- if (vdev->cap_flags & VNIC_DEV_CAP_INIT)
+ if (vnic_dev_capable(vdev, CMD_INIT))
r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
else {
vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
if (a0 & CMD_INITF_DEFAULT_MAC) {
- // Emulate these for old CMD_INIT_v1 which
- // didn't pass a0 so no CMD_INITF_*.
+ /* Emulate these for old CMD_INIT_v1 which
+ * didn't pass a0 so no CMD_INITF_*.
+ */
vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
}
@@ -700,7 +875,7 @@ int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
*done = (a0 == 0);
- *err = (a0 == 0) ? a1 : 0;
+ *err = (a0 == 0) ? (int)a1:0;
return 0;
}
@@ -738,9 +913,6 @@ int vnic_dev_deinit(struct vnic_dev *vdev)
int vnic_dev_link_status(struct vnic_dev *vdev)
{
- if (vdev->linkstatus)
- return *vdev->linkstatus;
-
if (!vnic_dev_notify_ready(vdev))
return 0;
@@ -787,6 +959,14 @@ u32 vnic_dev_notify_status(struct vnic_dev *vdev)
return vdev->notify_copy.status;
}
+u32 vnic_dev_uif(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.uif;
+}
+
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
@@ -807,14 +987,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
- if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
- sizeof(u32),
- vdev->linkstatus,
- vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_dev),
+ sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
@@ -844,11 +1019,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
if (!vdev->devcmd)
goto err_out;
- vdev->cap_flags = 0;
-
- if (vnic_dev_capable(vdev, CMD_INIT))
- vdev->cap_flags |= VNIC_DEV_CAP_INIT;
-
return vdev;
err_out:
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index caccce3..3a61873 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -41,6 +41,9 @@ static inline void writeq(u64 val, void __iomem *reg)
}
#endif
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
enum vnic_dev_hw_version {
VNIC_DEV_HW_VER_UNKNOWN,
VNIC_DEV_HW_VER_A1,
@@ -92,6 +95,8 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_hw_version(struct vnic_dev *vdev,
@@ -101,8 +106,10 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
-void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+ int multicast, int broadcast, int promisc, int allmulti);
int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
@@ -110,16 +117,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
void *notify_addr, dma_addr_t notify_pa, u16 intr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
-void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
-void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
+int vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
u32 vnic_dev_mtu(struct vnic_dev *vdev);
u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
u32 vnic_dev_notify_status(struct vnic_dev *vdev);
+u32 vnic_dev_uif(struct vnic_dev *vdev);
int vnic_dev_close(struct vnic_dev *vdev);
int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_enable_wait(struct vnic_dev *vdev);
int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
@@ -129,10 +138,14 @@ int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_deinit(struct vnic_dev *vdev);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
void vnic_dev_unregister(struct vnic_dev *vdev);
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d78bbcc..2066175 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -98,6 +98,9 @@ enum vnic_devcmd_cmd {
/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+ /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
/* hang detection notification */
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
@@ -171,6 +174,9 @@ enum vnic_devcmd_cmd {
/* enable virtual link */
CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+ /* enable virtual link, waiting variant. */
+ CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
/* disable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
@@ -211,6 +217,27 @@ enum vnic_devcmd_cmd {
* in: (u16)a0=interrupt number to assert
*/
CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+ /* initiate hangreset, like softreset after hang detected */
+ CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+ /* hangreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+ /*
+ * Set hw ingress packet vlan rewrite mode:
+ * in: (u32)a0=new vlan rewrite mode
+ * out: (u32)a0=old vlan rewrite mode */
+ CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+ /*
+ * in: (u16)a0=bdf of target vnic
+ * (u32)a1=cmd to proxy
+ * a2-a15=args to cmd in a1
+ * out: (u32)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd */
+ CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
};
/* flags for CMD_OPEN */
@@ -226,6 +253,12 @@ enum vnic_devcmd_cmd {
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
+/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
+#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
+#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
+#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2
+#define IG_VLAN_REWRITE_MODE_PASS_THRU 3
+
enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 8eeb675..3b32912 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -35,6 +35,7 @@ struct vnic_enet_config {
u8 intr_mode;
char devname[16];
u32 intr_timer_usec;
+ u16 loop_tag;
};
#define VENETF_TSO 0x1 /* TSO enabled */
@@ -48,5 +49,6 @@ struct vnic_enet_config {
#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
+#define VENETF_LOOP 0x800 /* Loopback enabled */
#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 3934309..52ab61a 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -39,8 +39,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
- printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
- index);
+ pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 2fe6c63..09dc0b7 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -61,7 +61,11 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
- (void)ioread32(&intr->ctrl->mask);
+}
+
+static inline int vnic_intr_masked(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index cf80ab4..995a50d 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index b61c22a..810287b 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index cc580cf..dbb2aca 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -37,23 +37,23 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
vdev = rq->vdev;
for (i = 0; i < blks; i++) {
- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!rq->bufs[i]) {
- printk(KERN_ERR "Failed to alloc rq_bufs\n");
+ pr_err("Failed to alloc rq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = rq->bufs[i];
- for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
- buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+ for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
+ buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
buf->desc = (u8 *)rq->ring.descs +
rq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = rq->bufs[0];
break;
- } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+ } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
buf->next = rq->bufs[i + 1];
} else {
buf->next = buf + 1;
@@ -94,7 +94,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
- printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
+ pr_err("Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
@@ -119,10 +119,11 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_offset)
{
u64 paddr;
+ unsigned int count = rq->ring.desc_count;
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
- iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+ iowrite32(count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
@@ -132,8 +133,8 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
iowrite32(posted_index, &rq->ctrl->posted_index);
rq->to_use = rq->to_clean =
- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
@@ -145,6 +146,11 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
vnic_rq_init_start(rq, cq_index,
fetch_index, fetch_index,
error_interrupt_enable,
@@ -174,7 +180,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
udelay(10);
}
- printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
+ pr_err("Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
@@ -184,8 +190,7 @@ void vnic_rq_clean(struct vnic_rq *rq,
{
struct vnic_rq_buf *buf;
u32 fetch_index;
-
- BUG_ON(ioread32(&rq->ctrl->enable));
+ unsigned int count = rq->ring.desc_count;
buf = rq->to_clean;
@@ -199,9 +204,14 @@ void vnic_rq_clean(struct vnic_rq *rq,
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
rq->to_use = rq->to_clean =
- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
vnic_dev_clear_desc_ring(&rq->ring);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 35e736c..2dc48f9 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008, 2009 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -52,12 +52,16 @@ struct vnic_rq_ctrl {
u32 pad10;
};
-/* Break the vnic_rq_buf allocations into blocks of 64 entries */
-#define VNIC_RQ_BUF_BLK_ENTRIES 64
-#define VNIC_RQ_BUF_BLK_SZ \
- (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
+#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_RQ_BUF_BLK_SZ(entries) \
+ (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
- DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
struct vnic_rq_buf {
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
index 5fbb3c9..f62d187 100644
--- a/drivers/net/enic/vnic_rss.h
+++ b/drivers/net/enic/vnic_rss.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
index 9ff9614..77750ec 100644
--- a/drivers/net/enic/vnic_stats.h
+++ b/drivers/net/enic/vnic_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 0a35085..197c9d2 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -27,6 +27,9 @@ struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
{
struct vic_provinfo *vp;
+ if (!oui)
+ return NULL;
+
vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
if (!vp)
return NULL;
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 1378afb..122e33b 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -37,23 +37,23 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
vdev = wq->vdev;
for (i = 0; i < blks; i++) {
- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!wq->bufs[i]) {
- printk(KERN_ERR "Failed to alloc wq_bufs\n");
+ pr_err("Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+ for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
+ buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+ } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
@@ -94,7 +94,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
- printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
+ pr_err("Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
@@ -119,10 +119,11 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_offset)
{
u64 paddr;
+ unsigned int count = wq->ring.desc_count;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
- iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+ iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
iowrite32(posted_index, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
@@ -131,8 +132,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
iowrite32(0, &wq->ctrl->error_status);
wq->to_use = wq->to_clean =
- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES]
- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES];
+ &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
@@ -167,7 +168,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
udelay(10);
}
- printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
+ pr_err("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
@@ -177,8 +178,6 @@ void vnic_wq_clean(struct vnic_wq *wq,
{
struct vnic_wq_buf *buf;
- BUG_ON(ioread32(&wq->ctrl->enable));
-
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
index 9c34d41..94ac462 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/enic/vnic_wq.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -60,12 +60,16 @@ struct vnic_wq_buf {
void *desc;
};
-/* Break the vnic_wq_buf allocations into blocks of 64 entries */
-#define VNIC_WQ_BUF_BLK_ENTRIES 64
-#define VNIC_WQ_BUF_BLK_SZ \
- (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
+#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_WQ_BUF_BLK_SZ(entries) \
+ (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
struct vnic_wq {
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
index 483596c..c7021e3 100644
--- a/drivers/net/enic/wq_enet_desc.h
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index a3cae4e..b4afd7a 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1360,11 +1360,10 @@ fec_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-
static int
-fec_suspend(struct platform_device *dev, pm_message_t state)
+fec_suspend(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep;
if (ndev) {
@@ -1377,9 +1376,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state)
}
static int
-fec_resume(struct platform_device *dev)
+fec_resume(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep;
if (ndev) {
@@ -1399,23 +1398,18 @@ static const struct dev_pm_ops fec_pm_ops = {
.poweroff = fec_suspend,
.restore = fec_resume,
};
-
-#define FEC_PM_OPS (&fec_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define FEC_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
+#endif
static struct platform_driver fec_driver = {
.driver = {
- .name = "fec",
- .owner = THIS_MODULE,
- .pm = FEC_PM_OPS,
+ .name = "fec",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &fec_pm_ops,
+#endif
},
- .probe = fec_probe,
- .remove = __devexit_p(fec_drv_remove),
+ .probe = fec_probe,
+ .remove = __devexit_p(fec_drv_remove),
};
static int __init
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 8a17bf0..fccb7a3 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -85,6 +85,7 @@
#include <linux/net_tstamp.h>
#include <asm/io.h>
+#include <asm/reg.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -381,10 +382,14 @@ static void gfar_init_mac(struct net_device *ndev)
/* Insert receive time stamps into padding alignment bytes */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
rctrl &= ~RCTRL_PAL_MASK;
- rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+ rctrl |= RCTRL_PADDING(8);
priv->padding = 8;
}
+ /* Enable HW time stamping if requested from user space */
+ if (priv->hwts_rx_en)
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -747,7 +752,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -805,12 +811,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->hwts_rx_en = 0;
+ if (priv->hwts_rx_en) {
+ stop_gfar(netdev);
+ priv->hwts_rx_en = 0;
+ startup_gfar(netdev);
+ }
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
- priv->hwts_rx_en = 1;
+ if (!priv->hwts_rx_en) {
+ stop_gfar(netdev);
+ priv->hwts_rx_en = 1;
+ startup_gfar(netdev);
+ }
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
@@ -915,6 +929,34 @@ static void gfar_init_filer_table(struct gfar_private *priv)
}
}
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+ struct device *dev = &priv->ofdev->dev;
+ unsigned int pvr = mfspr(SPRN_PVR);
+ unsigned int svr = mfspr(SPRN_SVR);
+ unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
+ unsigned int rev = svr & 0xffff;
+
+ /* MPC8313 Rev 2.0 and higher; All MPC837x */
+ if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_74;
+
+ /* MPC8313 and MPC837x all rev */
+ if ((pvr == 0x80850010 && mod == 0x80b0) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_76;
+
+ /* MPC8313 and MPC837x all rev */
+ if ((pvr == 0x80850010 && mod == 0x80b0) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_A002;
+
+ if (priv->errata)
+ dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+ priv->errata);
+}
+
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
static int gfar_probe(struct of_device *ofdev,
@@ -947,6 +989,8 @@ static int gfar_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, priv);
regs = priv->gfargrp[0].regs;
+ gfar_detect_errata(priv);
+
/* Stop the DMA engine now, in case it was running before */
/* (The firmware could have used it, and left it running). */
gfar_halt(dev);
@@ -961,7 +1005,10 @@ static int gfar_probe(struct of_device *ofdev,
gfar_write(&regs->maccfg1, tempval);
/* Initialize MACCFG2. */
- gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
+ tempval = MACCFG2_INIT_SETTINGS;
+ if (gfar_has_errata(priv, GFAR_ERRATA_74))
+ tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+ gfar_write(&regs->maccfg2, tempval);
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
@@ -1528,6 +1575,29 @@ static void init_registers(struct net_device *dev)
gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
}
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+ u32 res;
+
+ /*
+ * Normaly TSEC should not hang on GRS commands, so we should
+ * actually wait for IEVENT_GRSC flag.
+ */
+ if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+ return 0;
+
+ /*
+ * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+ * and the Rx can be safely reset.
+ */
+ res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+ res &= 0x7f807f80;
+ if ((res & 0xffff) == (res >> 16))
+ return 1;
+
+ return 0;
+}
/* Halt the receive and transmit queues */
static void gfar_halt_nodisable(struct net_device *dev)
@@ -1551,12 +1621,18 @@ static void gfar_halt_nodisable(struct net_device *dev)
tempval = gfar_read(&regs->dmactrl);
if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
!= (DMACTRL_GRS | DMACTRL_GTS)) {
+ int ret;
+
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(&regs->dmactrl, tempval);
- spin_event_timeout(((gfar_read(&regs->ievent) &
- (IEVENT_GRSC | IEVENT_GTSC)) ==
- (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
+ do {
+ ret = spin_event_timeout(((gfar_read(&regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
+ if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
+ ret = __gfar_is_rx_idle(priv);
+ } while (!ret);
}
}
@@ -1974,6 +2050,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nr_frags, nr_txbds, length;
union skb_shared_tx *shtx;
+ /*
+ * TOE=1 frames larger than 2500 bytes may see excess delays
+ * before start of transmission.
+ */
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->len > 2500)) {
+ int ret;
+
+ ret = skb_checksum_help(skb);
+ if (ret)
+ return ret;
+ }
+
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
txq = netdev_get_tx_queue(dev, rq);
@@ -2287,7 +2377,8 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
* to allow huge frames, and to check the length */
tempval = gfar_read(&regs->maccfg2);
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
else
tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2642,6 +2733,10 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
priv->rx_buffer_size, DMA_FROM_DEVICE);
+ if (unlikely(!(bdp->status & RXBD_ERR) &&
+ bdp->length > priv->rx_buffer_size))
+ bdp->status = RXBD_LARGE;
+
/* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
bdp->status & RXBD_ERR)) {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ac4a92e..710810e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1025,6 +1025,12 @@ struct gfar_priv_grp {
char int_name_er[GFAR_INT_NAME_MAX];
};
+enum gfar_errata {
+ GFAR_ERRATA_74 = 0x01,
+ GFAR_ERRATA_76 = 0x02,
+ GFAR_ERRATA_A002 = 0x04,
+};
+
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblence)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1049,6 +1055,7 @@ struct gfar_private {
struct device_node *node;
struct net_device *ndev;
struct of_device *ofdev;
+ enum gfar_errata errata;
struct gfar_priv_grp gfargrp[MAXGROUPS];
struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
@@ -1111,6 +1118,12 @@ struct gfar_private {
extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+static inline int gfar_has_errata(struct gfar_private *priv,
+ enum gfar_errata err)
+{
+ return priv->errata & err;
+}
+
static inline u32 gfar_read(volatile unsigned __iomem *addr)
{
u32 val;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 86438b5..06251a9 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -295,6 +295,10 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
return igb_acquire_swfw_sync_82575(hw, mask);
}
@@ -312,6 +316,10 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
igb_release_swfw_sync_82575(hw, mask);
}
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 24d9be6..90bc29d 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -164,6 +164,8 @@
#define E1000_SWFW_EEP_SM 0x1
#define E1000_SWFW_PHY0_SM 0x2
#define E1000_SWFW_PHY1_SM 0x4
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
/* FACTPS Definitions */
/* Device Control */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index f2ebf927..26bf6a1 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1823,12 +1823,10 @@ static void igb_diag_test(struct net_device *netdev,
dev_info(&adapter->pdev->dev, "online testing starting\n");
/* PHY is powered down when interface is down */
- if (!netif_carrier_ok(netdev)) {
+ if (if_running && igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ else
data[4] = 0;
- } else {
- if (igb_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
- }
/* Online tests aren't run; pass by default */
data[0] = 0;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3881918..9cb04e2 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -630,9 +630,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
for (; i < adapter->rss_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
- for (; j < adapter->rss_queues; j++)
- adapter->tx_ring[j]->reg_idx = rbase_offset +
- Q_IDX_82576(j);
}
case e1000_82575:
case e1000_82580:
@@ -996,7 +993,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
- adapter->num_tx_queues = adapter->rss_queues;
+ if (adapter->vfs_allocated_count)
+ adapter->num_tx_queues = 1;
+ else
+ adapter->num_tx_queues = adapter->rss_queues;
/* start with one vector for every rx queue */
numvecs = adapter->num_rx_queues;
@@ -2091,9 +2091,6 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
- if (adapter->vfs_allocated_count > 7)
- adapter->vfs_allocated_count = 7;
-
if (adapter->vfs_allocated_count) {
adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
sizeof(struct vf_data_storage),
@@ -2258,7 +2255,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
#ifdef CONFIG_PCI_IOV
if (hw->mac.type == e1000_82576)
- adapter->vfs_allocated_count = max_vfs;
+ adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
@@ -4977,6 +4974,10 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
{
+ /*
+ * The VF MAC Address is stored in a packed array of bytes
+ * starting at the second 32 bit word of the msg array
+ */
unsigned char *addr = (char *)&msg[1];
int err = -1;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 9270089..9e15eb9 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -110,7 +110,6 @@ struct vf_data_storage {
u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
- int rar;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 976fd9e..0ee175a 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -206,6 +206,14 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
s32 status = 0;
u32 autoc = 0;
+ /* Determine 1G link capabilities off of SFP+ type */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ goto out;
+ }
+
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not been
@@ -2087,6 +2095,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u16 ext_ability = 0;
u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
hw->phy.ops.identify(hw);
@@ -2167,11 +2176,15 @@ sfp_check:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
default:
break;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 873b45e..5275e9c 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -234,6 +234,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_not_present:
ecmd->port = PORT_NONE;
break;
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->port = PORT_TP;
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ break;
case ixgbe_sfp_type_unknown:
default:
ecmd->port = PORT_OTHER;
@@ -2205,8 +2212,11 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
+ int rc;
- ethtool_op_set_flags(netdev, data);
+ rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
+ if (rc)
+ return rc;
/* if state changes we need to update adapter->flags and reset */
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
@@ -2227,7 +2237,7 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
break;
}
} else if (!adapter->rx_itr_setting) {
- netdev->features &= ~ETH_FLAG_LRO;
+ netdev->features &= ~NETIF_F_LRO;
if (data & ETH_FLAG_LRO)
e_info("rx-usecs set to 0, "
"LRO/RSC cannot be enabled.\n");
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9cca737..ebc4b04 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2992,6 +2992,48 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->num_vfs;
+ unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+ /* return error if we do not support writing to RAR table */
+ if (!hw->mac.ops.set_rar)
+ return -ENOMEM;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
+ vfn, IXGBE_RAH_AV);
+ count++;
+ }
+ }
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--)
+ hw->mac.ops.clear_rar(hw, rar_entries);
+
+ return count;
+}
+
+/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -3004,38 +3046,58 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl;
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ int count;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
/* don't hardware filter vlans in promisc mode */
ixgbe_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
- fctrl &= ~IXGBE_FCTRL_UPE;
- } else if (!hw->addr_ctrl.uc_set_promisc) {
- fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
}
ixgbe_vlan_filter_enable(adapter);
hw->addr_ctrl.user_set_promisc = false;
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscous mode
+ */
+ count = ixgbe_write_uc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_UPE;
+ vmolr |= IXGBE_VMOLR_ROPE;
+ }
}
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
- /* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, netdev);
-
- /* reprogram multicast list */
- hw->mac.ops.update_mc_addr_list(hw, netdev);
-
- if (adapter->num_vfs)
+ if (adapter->num_vfs) {
ixgbe_restore_vf_multicasts(adapter);
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 09e1911..6c0d42e 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -575,6 +575,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 4 SFP_DA_CORE1 - 82599-specific
* 5 SFP_SR/LR_CORE0 - 82599-specific
* 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -623,6 +627,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core1;
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -694,8 +705,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
- /* 1G SFP modules are not supported */
- if (comp_codes_10g == 0) {
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -709,7 +722,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* This is guaranteed to be 82599, no need to check for NULL */
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
- if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
@@ -740,6 +755,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *data_offset)
{
u16 sfp_id;
+ u16 sfp_type = hw->phy.sfp_type;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -751,6 +767,17 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type = ixgbe_sfp_type_srlr_core0;
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type = ixgbe_sfp_type_srlr_core1;
+
/* Read offset to PHY init contents */
hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
@@ -767,7 +794,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
- if (sfp_id == hw->phy.sfp_type) {
+ if (sfp_id == sfp_type) {
(*list_offset)++;
hw->eeprom.ops.read(hw, *list_offset, data_offset);
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index ef4ba83..fb3898f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -48,6 +48,7 @@
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 66f6e62..6e6dee0 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -137,6 +137,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
/* reset offloads to defaults */
if (adapter->vfinfo[vf].pf_vlan) {
@@ -158,26 +159,17 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- if (adapter->vfinfo[vf].rar > 0) {
- adapter->hw.mac.ops.clear_rar(&adapter->hw,
- adapter->vfinfo[vf].rar);
- adapter->vfinfo[vf].rar = -1;
- }
+ hw->mac.ops.clear_rar(hw, rar_entry);
}
int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
struct ixgbe_hw *hw = &adapter->hw;
-
- adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
- vf, IXGBE_RAH_AV);
- if (adapter->vfinfo[vf].rar < 0) {
- e_err("Could not set MAC Filter for VF %d\n", vf);
- return -1;
- }
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index cdd1998..9587d97 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2214,6 +2214,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_srlr_core1 = 6,
ixgbe_sfp_type_da_act_lmt_core0 = 7,
ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7805bbf..62362b4 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -5718,7 +5718,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
- dev->br_port) {
+ (dev->priv_flags & IFF_BRIDGE_PORT)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index ce5d6e9..c27f429 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1343,7 +1343,7 @@ static void set_multicast_list(struct net_device *dev)
DEB(DEB_MULTI,
printk(KERN_DEBUG
"%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count,
+ dev->name, netdev_mc_count(dev),
dev->flags & IFF_PROMISC ? "ON" : "OFF",
dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 52dcc84..7b12d0e 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -202,14 +202,29 @@ static int temac_dma_bd_init(struct net_device *ndev)
int i;
lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+ if (!lp->rx_skb) {
+ dev_err(&ndev->dev,
+ "can't allocate memory for DMA RX buffer\n");
+ goto out;
+ }
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual addres and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
+ if (!lp->tx_bd_v) {
+ dev_err(&ndev->dev,
+ "unable to allocate DMA TX buffer descriptors");
+ goto out;
+ }
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL);
+ if (!lp->rx_bd_v) {
+ dev_err(&ndev->dev,
+ "unable to allocate DMA RX buffer descriptors");
+ goto out;
+ }
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
@@ -227,7 +242,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
- return -1;
+ goto out;
}
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
@@ -258,6 +273,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
+
+out:
+ return -ENOMEM;
}
/* ---------------------------------------------------------------------
@@ -505,7 +523,10 @@ static void temac_device_reset(struct net_device *ndev)
}
lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
- temac_dma_bd_init(ndev);
+ if (temac_dma_bd_init(ndev)) {
+ dev_err(&ndev->dev,
+ "temac_device_reset descriptor allocation failed\n");
+ }
temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 09334f8..4dd0510 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -58,53 +58,15 @@
#include <linux/tcp.h>
#include <linux/percpu.h>
#include <net/net_namespace.h>
+#include <linux/u64_stats_sync.h>
struct pcpu_lstats {
- u64 packets;
- u64 bytes;
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- seqcount_t seq;
-#endif
- unsigned long drops;
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+ unsigned long drops;
};
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
-static void inline lstats_update_begin(struct pcpu_lstats *lstats)
-{
- write_seqcount_begin(&lstats->seq);
-}
-static void inline lstats_update_end(struct pcpu_lstats *lstats)
-{
- write_seqcount_end(&lstats->seq);
-}
-static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
-{
- u64 tpackets, tbytes;
- unsigned int seq;
-
- do {
- seq = read_seqcount_begin(&lstats->seq);
- tpackets = lstats->packets;
- tbytes = lstats->bytes;
- } while (read_seqcount_retry(&lstats->seq, seq));
-
- *packets += tpackets;
- *bytes += tbytes;
-}
-#else
-static void inline lstats_update_begin(struct pcpu_lstats *lstats)
-{
-}
-static void inline lstats_update_end(struct pcpu_lstats *lstats)
-{
-}
-static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
-{
- *packets += lstats->packets;
- *bytes += lstats->bytes;
-}
-#endif
-
/*
* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
@@ -126,10 +88,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
- lstats_update_begin(lb_stats);
+ u64_stats_update_begin(&lb_stats->syncp);
lb_stats->bytes += len;
lb_stats->packets++;
- lstats_update_end(lb_stats);
+ u64_stats_update_end(&lb_stats->syncp);
} else
lb_stats->drops++;
@@ -148,10 +110,18 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
pcpu_lstats = (void __percpu __force *)dev->ml_priv;
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
+ u64 tbytes, tpackets;
+ unsigned int start;
lb_stats = per_cpu_ptr(pcpu_lstats, i);
- lstats_fetch_and_add(&packets, &bytes, lb_stats);
+ do {
+ start = u64_stats_fetch_begin(&lb_stats->syncp);
+ tbytes = lb_stats->bytes;
+ tpackets = lb_stats->packets;
+ } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
drops += lb_stats->drops;
+ bytes += tbytes;
+ packets += tpackets;
}
stats->rx_packets = packets;
stats->tx_packets = packets;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 59c3155..e6d626e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -40,6 +40,11 @@ struct macvlan_port {
struct rcu_head rcu;
};
+#define macvlan_port_get_rcu(dev) \
+ ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
+#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
+#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
+
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -155,7 +160,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
struct net_device *dev;
unsigned int len;
- port = rcu_dereference(skb->dev->macvlan_port);
+ port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
@@ -426,29 +431,38 @@ static void macvlan_uninit(struct net_device *dev)
free_percpu(vlan->rx_stats);
}
-static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev)
{
- struct net_device_stats *stats = &dev->stats;
+ struct rtnl_link_stats64 *stats = &dev->stats64;
struct macvlan_dev *vlan = netdev_priv(dev);
- dev_txq_stats_fold(dev, stats);
+ dev_txq_stats_fold(dev, &dev->stats);
if (vlan->rx_stats) {
- struct macvlan_rx_stats *p, rx = {0};
+ struct macvlan_rx_stats *p, accum = {0};
+ u64 rx_packets, rx_bytes, rx_multicast;
+ unsigned int start;
int i;
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan->rx_stats, i);
- rx.rx_packets += p->rx_packets;
- rx.rx_bytes += p->rx_bytes;
- rx.rx_errors += p->rx_errors;
- rx.multicast += p->multicast;
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ rx_multicast = p->rx_multicast;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ accum.rx_packets += rx_packets;
+ accum.rx_bytes += rx_bytes;
+ accum.rx_multicast += rx_multicast;
+ /* rx_errors is an ulong, updated without syncp protection */
+ accum.rx_errors += p->rx_errors;
}
- stats->rx_packets = rx.rx_packets;
- stats->rx_bytes = rx.rx_bytes;
- stats->rx_errors = rx.rx_errors;
- stats->rx_dropped = rx.rx_errors;
- stats->multicast = rx.multicast;
+ stats->rx_packets = accum.rx_packets;
+ stats->rx_bytes = accum.rx_bytes;
+ stats->rx_errors = accum.rx_errors;
+ stats->rx_dropped = accum.rx_errors;
+ stats->multicast = accum.rx_multicast;
}
return stats;
}
@@ -497,7 +511,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
.ndo_set_multicast_list = macvlan_set_multicast_list,
- .ndo_get_stats = macvlan_dev_get_stats,
+ .ndo_get_stats64 = macvlan_dev_get_stats64,
.ndo_validate_addr = eth_validate_addr,
};
@@ -530,14 +544,12 @@ static int macvlan_port_create(struct net_device *dev)
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
- rcu_assign_pointer(dev->macvlan_port, port);
- err = netdev_rx_handler_register(dev, macvlan_handle_frame);
- if (err) {
- rcu_assign_pointer(dev->macvlan_port, NULL);
+ err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
+ if (err)
kfree(port);
- }
+ dev->priv_flags |= IFF_MACVLAN_PORT;
return err;
}
@@ -551,10 +563,10 @@ static void macvlan_port_rcu_free(struct rcu_head *head)
static void macvlan_port_destroy(struct net_device *dev)
{
- struct macvlan_port *port = dev->macvlan_port;
+ struct macvlan_port *port = macvlan_port_get(dev);
+ dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
- rcu_assign_pointer(dev->macvlan_port, NULL);
call_rcu(&port->rcu, macvlan_port_rcu_free);
}
@@ -633,12 +645,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_ADDRESS])
random_ether_addr(dev->dev_addr);
- if (lowerdev->macvlan_port == NULL) {
+ if (!macvlan_port_exists(lowerdev)) {
err = macvlan_port_create(lowerdev);
if (err < 0)
return err;
}
- port = lowerdev->macvlan_port;
+ port = macvlan_port_get(lowerdev);
vlan->lowerdev = lowerdev;
vlan->dev = dev;
@@ -748,10 +760,11 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_dev *vlan, *next;
struct macvlan_port *port;
- port = dev->macvlan_port;
- if (port == NULL)
+ if (!macvlan_port_exists(dev))
return NOTIFY_DONE;
+ port = macvlan_port_get(dev);
+
switch (event) {
case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list)
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 8e9704f..869f0ea 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -247,7 +247,7 @@ static const struct net_device_ops mipsnet_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __init mipsnet_probe(struct platform_device *dev)
+static int __devinit mipsnet_probe(struct platform_device *dev)
{
struct net_device *netdev;
int err;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index d5afd03..b275238 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -387,6 +387,42 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
}
+static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int rc = 0;
+ int changed = 0;
+
+ if (data & ~ETH_FLAG_LRO)
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_LRO) {
+ if (mdev->profile.num_lro == 0)
+ return -EOPNOTSUPP;
+ if (!(dev->features & NETIF_F_LRO))
+ changed = 1;
+ } else if (dev->features & NETIF_F_LRO) {
+ changed = 1;
+ }
+
+ if (changed) {
+ if (netif_running(dev)) {
+ mutex_lock(&mdev->state_lock);
+ mlx4_en_stop_port(dev);
+ }
+ dev->features ^= NETIF_F_LRO;
+ if (netif_running(dev)) {
+ rc = mlx4_en_start_port(dev);
+ if (rc)
+ en_err(priv, "Failed to restart port\n");
+ mutex_unlock(&mdev->state_lock);
+ }
+ }
+
+ return rc;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -415,7 +451,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
+ .set_flags = mlx4_ethtool_op_set_flags,
};
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index e345ec8..82b720f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1636,6 +1636,11 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
}
}
+static int mv643xx_eth_set_flags(struct net_device *dev, u32 data)
+{
+ return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO);
+}
+
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
@@ -1661,7 +1666,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.get_strings = mv643xx_eth_get_strings,
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
+ .set_flags = mv643xx_eth_set_flags,
.get_sset_count = mv643xx_eth_get_sset_count,
};
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e0b47cc..d771d16 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1730,8 +1730,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
if (csum_enabled)
mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
else {
- u32 flags = ethtool_op_get_flags(netdev);
- err = ethtool_op_set_flags(netdev, (flags & ~ETH_FLAG_LRO));
+ netdev->features &= ~NETIF_F_LRO;
mgp->csum_flag = 0;
}
@@ -1900,6 +1899,11 @@ static u32 myri10ge_get_msglevel(struct net_device *netdev)
return mgp->msg_enable;
}
+static int myri10ge_set_flags(struct net_device *netdev, u32 value)
+{
+ return ethtool_op_set_flags(netdev, value, ETH_FLAG_LRO);
+}
+
static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
@@ -1920,7 +1924,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags
+ .set_flags = myri10ge_set_flags
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index f26e547..3a41b6a 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -629,7 +629,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
netdev->name);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_free;
}
tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 20f7c58..b30de24 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -887,12 +887,19 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
struct netxen_adapter *adapter = netdev_priv(netdev);
int hw_lro;
- if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
+ if (data & ~ETH_FLAG_LRO)
return -EINVAL;
- ethtool_op_set_flags(netdev, data);
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
+ return -EINVAL;
- hw_lro = (data & ETH_FLAG_LRO) ? NETXEN_NIC_LRO_ENABLED : 0;
+ if (data & ETH_FLAG_LRO) {
+ hw_lro = NETXEN_NIC_LRO_ENABLED;
+ netdev->features |= NETIF_F_LRO;
+ } else {
+ hw_lro = 0;
+ netdev->features &= ~NETIF_F_LRO;
+ }
if (netxen_config_hw_lro(adapter, hw_lro))
return -EIO;
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 5c496f8..29d7b93 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1159,9 +1159,6 @@ netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
window = CRB_HI(off);
- if (adapter->ahw.crb_win == window)
- return;
-
writel(window, addr);
if (readl(addr) != window) {
if (printk_ratelimit())
@@ -1169,7 +1166,6 @@ netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
"failed to set CRB window to %d off 0x%lx\n",
window, off);
}
- adapter->ahw.crb_win = window;
}
static void __iomem *
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 045a7c8..c865dda 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -218,7 +218,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
if (cmd_buf_arr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
netdev->name);
- return -ENOMEM;
+ goto err_out;
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
@@ -230,7 +230,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
if (rds_ring == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
netdev->name);
- return -ENOMEM;
+ goto err_out;
}
recv_ctx->rds_rings = rds_ring;
@@ -1805,9 +1805,10 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
netxen_ctx_msg msg = 0;
struct list_head *head;
+ spin_lock(&rds_ring->lock);
+
producer = rds_ring->producer;
- spin_lock(&rds_ring->lock);
head = &rds_ring->free_list;
while (!list_empty(head)) {
@@ -1829,7 +1830,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
producer = get_next_index(producer, rds_ring->num_desc);
}
- spin_unlock(&rds_ring->lock);
if (count) {
rds_ring->producer = producer;
@@ -1853,6 +1853,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
+
+ spin_unlock(&rds_ring->lock);
}
static void
@@ -1864,10 +1866,11 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
int producer, count = 0;
struct list_head *head;
- producer = rds_ring->producer;
if (!spin_trylock(&rds_ring->lock))
return;
+ producer = rds_ring->producer;
+
head = &rds_ring->free_list;
while (!list_empty(head)) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 63e8e38..3d523cb 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7920,14 +7920,7 @@ static int niu_phys_id(struct net_device *dev, u32 data)
static int niu_set_flags(struct net_device *dev, u32 data)
{
- if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
- return -EOPNOTSUPP;
-
- if (data & ETH_FLAG_RXHASH)
- dev->features |= NETIF_F_RXHASH;
- else
- dev->features &= ~NETIF_F_RXHASH;
- return 0;
+ return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
}
static const struct ethtool_ops niu_ethtool_ops = {
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 000e792..f4a0f08 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -1067,7 +1067,7 @@ static const struct net_device_ops octeon_mgmt_ops = {
#endif
};
-static int __init octeon_mgmt_probe(struct platform_device *pdev)
+static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
{
struct resource *res_irq;
struct net_device *netdev;
@@ -1124,7 +1124,7 @@ err:
return -ENOENT;
}
-static int __exit octeon_mgmt_remove(struct platform_device *pdev)
+static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
@@ -1139,7 +1139,7 @@ static struct platform_driver octeon_mgmt_driver = {
.owner = THIS_MODULE,
},
.probe = octeon_mgmt_probe,
- .remove = __exit_p(octeon_mgmt_remove),
+ .remove = __devexit_p(octeon_mgmt_remove),
};
extern void octeon_mdiobus_force_mod_depencency(void);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 6f77a76..bfdef72 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1727,6 +1727,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 7b6fe89..64e6a84 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -322,6 +322,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
return -ENOMEM;
smc = netdev_priv(dev);
smc->p_dev = link;
+ link->priv = dev;
spin_lock_init(&smc->lock);
link->io.NumPorts1 = 16;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cecdbbd..4accd83 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -685,7 +685,7 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
}
static struct phy_driver bcm5411_driver = {
- .phy_id = 0x00206070,
+ .phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
.features = PHY_GBIT_FEATURES |
@@ -700,7 +700,7 @@ static struct phy_driver bcm5411_driver = {
};
static struct phy_driver bcm5421_driver = {
- .phy_id = 0x002060e0,
+ .phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
.features = PHY_GBIT_FEATURES |
@@ -715,7 +715,7 @@ static struct phy_driver bcm5421_driver = {
};
static struct phy_driver bcm5461_driver = {
- .phy_id = 0x002060c0,
+ .phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
.features = PHY_GBIT_FEATURES |
@@ -730,7 +730,7 @@ static struct phy_driver bcm5461_driver = {
};
static struct phy_driver bcm5464_driver = {
- .phy_id = 0x002060b0,
+ .phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
.features = PHY_GBIT_FEATURES |
@@ -745,7 +745,7 @@ static struct phy_driver bcm5464_driver = {
};
static struct phy_driver bcm5481_driver = {
- .phy_id = 0x0143bca0,
+ .phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
.features = PHY_GBIT_FEATURES |
@@ -760,7 +760,7 @@ static struct phy_driver bcm5481_driver = {
};
static struct phy_driver bcm5482_driver = {
- .phy_id = 0x0143bcb0,
+ .phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES |
@@ -834,6 +834,21 @@ static struct phy_driver bcmac131_driver = {
.driver = { .owner = THIS_MODULE },
};
+static struct phy_driver bcm5241_driver = {
+ .phy_id = PHY_ID_BCM5241,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5241",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = brcm_fet_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = brcm_fet_ack_interrupt,
+ .config_intr = brcm_fet_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
static int __init broadcom_init(void)
{
int ret;
@@ -868,8 +883,13 @@ static int __init broadcom_init(void)
ret = phy_driver_register(&bcmac131_driver);
if (ret)
goto out_ac131;
+ ret = phy_driver_register(&bcm5241_driver);
+ if (ret)
+ goto out_5241;
return ret;
+out_5241:
+ phy_driver_unregister(&bcmac131_driver);
out_ac131:
phy_driver_unregister(&bcm57780_driver);
out_57780:
@@ -894,6 +914,7 @@ out_5411:
static void __exit broadcom_exit(void)
{
+ phy_driver_unregister(&bcm5241_driver);
phy_driver_unregister(&bcmac131_driver);
phy_driver_unregister(&bcm57780_driver);
phy_driver_unregister(&bcm50610m_driver);
@@ -910,16 +931,17 @@ module_init(broadcom_init);
module_exit(broadcom_exit);
static struct mdio_device_id broadcom_tbl[] = {
- { 0x00206070, 0xfffffff0 },
- { 0x002060e0, 0xfffffff0 },
- { 0x002060c0, 0xfffffff0 },
- { 0x002060b0, 0xfffffff0 },
- { 0x0143bca0, 0xfffffff0 },
- { 0x0143bcb0, 0xfffffff0 },
+ { PHY_ID_BCM5411, 0xfffffff0 },
+ { PHY_ID_BCM5421, 0xfffffff0 },
+ { PHY_ID_BCM5461, 0xfffffff0 },
+ { PHY_ID_BCM5464, 0xfffffff0 },
+ { PHY_ID_BCM5482, 0xfffffff0 },
+ { PHY_ID_BCM5482, 0xfffffff0 },
{ PHY_ID_BCM50610, 0xfffffff0 },
{ PHY_ID_BCM50610M, 0xfffffff0 },
{ PHY_ID_BCM57780, 0xfffffff0 },
{ PHY_ID_BCMAC131, 0xfffffff0 },
+ { PHY_ID_BCM5241, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index f443d43..bd12ba9 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -85,7 +85,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
return 0;
}
-static int __init octeon_mdiobus_probe(struct platform_device *pdev)
+static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
{
struct octeon_mdiobus *bus;
union cvmx_smix_en smi_en;
@@ -143,7 +143,7 @@ err:
return err;
}
-static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
+static int __devexit octeon_mdiobus_remove(struct platform_device *pdev)
{
struct octeon_mdiobus *bus;
union cvmx_smix_en smi_en;
@@ -163,7 +163,7 @@ static struct platform_driver octeon_mdiobus_driver = {
.owner = THIS_MODULE,
},
.probe = octeon_mdiobus_probe,
- .remove = __exit_p(octeon_mdiobus_remove),
+ .remove = __devexit_p(octeon_mdiobus_remove),
};
void octeon_mdiobus_force_mod_depencency(void)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0692f75..8bb7db6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -12,7 +12,8 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
- * Support : ksz9021 , vsc8201, ks8001
+ * Support : ksz9021 1000/100/10 phy from Micrel
+ * ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy
*/
#include <linux/kernel.h>
@@ -20,37 +21,146 @@
#include <linux/phy.h>
#define PHY_ID_KSZ9021 0x00221611
-#define PHY_ID_VSC8201 0x000FC413
+#define PHY_ID_KS8737 0x00221720
+#define PHY_ID_KS8041 0x00221510
+#define PHY_ID_KS8051 0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
#define PHY_ID_KS8001 0x0022161A
+/* general Interrupt control/status reg in vendor specific block. */
+#define MII_KSZPHY_INTCS 0x1B
+#define KSZPHY_INTCS_JABBER (1 << 15)
+#define KSZPHY_INTCS_RECEIVE_ERR (1 << 14)
+#define KSZPHY_INTCS_PAGE_RECEIVE (1 << 13)
+#define KSZPHY_INTCS_PARELLEL (1 << 12)
+#define KSZPHY_INTCS_LINK_PARTNER_ACK (1 << 11)
+#define KSZPHY_INTCS_LINK_DOWN (1 << 10)
+#define KSZPHY_INTCS_REMOTE_FAULT (1 << 9)
+#define KSZPHY_INTCS_LINK_UP (1 << 8)
+#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
+ KSZPHY_INTCS_LINK_DOWN)
+
+/* general PHY control reg in vendor specific block. */
+#define MII_KSZPHY_CTRL 0x1F
+/* bitmap of PHY register to set interrupt mode */
+#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
+#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
+#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
+
+static int kszphy_ack_interrupt(struct phy_device *phydev)
+{
+ /* bit[7..0] int status, which is a read and clear register. */
+ int rc;
+
+ rc = phy_read(phydev, MII_KSZPHY_INTCS);
+
+ return (rc < 0) ? rc : 0;
+}
+
+static int kszphy_set_interrupt(struct phy_device *phydev)
+{
+ int temp;
+ temp = (PHY_INTERRUPT_ENABLED == phydev->interrupts) ?
+ KSZPHY_INTCS_ALL : 0;
+ return phy_write(phydev, MII_KSZPHY_INTCS, temp);
+}
+
+static int kszphy_config_intr(struct phy_device *phydev)
+{
+ int temp, rc;
+
+ /* set the interrupt pin active low */
+ temp = phy_read(phydev, MII_KSZPHY_CTRL);
+ temp &= ~KSZPHY_CTRL_INT_ACTIVE_HIGH;
+ phy_write(phydev, MII_KSZPHY_CTRL, temp);
+ rc = kszphy_set_interrupt(phydev);
+ return rc < 0 ? rc : 0;
+}
+
+static int ksz9021_config_intr(struct phy_device *phydev)
+{
+ int temp, rc;
+
+ /* set the interrupt pin active low */
+ temp = phy_read(phydev, MII_KSZPHY_CTRL);
+ temp &= ~KSZ9021_CTRL_INT_ACTIVE_HIGH;
+ phy_write(phydev, MII_KSZPHY_CTRL, temp);
+ rc = kszphy_set_interrupt(phydev);
+ return rc < 0 ? rc : 0;
+}
+
+static int ks8737_config_intr(struct phy_device *phydev)
+{
+ int temp, rc;
+
+ /* set the interrupt pin active low */
+ temp = phy_read(phydev, MII_KSZPHY_CTRL);
+ temp &= ~KS8737_CTRL_INT_ACTIVE_HIGH;
+ phy_write(phydev, MII_KSZPHY_CTRL, temp);
+ rc = kszphy_set_interrupt(phydev);
+ return rc < 0 ? rc : 0;
+}
static int kszphy_config_init(struct phy_device *phydev)
{
return 0;
}
+static struct phy_driver ks8737_driver = {
+ .phy_id = PHY_ID_KS8737,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KS8737",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = ks8737_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver ks8041_driver = {
+ .phy_id = PHY_ID_KS8041,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KS8041",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
-static struct phy_driver ks8001_driver = {
- .phy_id = PHY_ID_KS8001,
- .name = "Micrel KS8001",
+static struct phy_driver ks8051_driver = {
+ .phy_id = PHY_ID_KS8051,
.phy_id_mask = 0x00fffff0,
- .features = PHY_BASIC_FEATURES,
- .flags = PHY_POLL,
+ .name = "Micrel KS8051",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
};
-static struct phy_driver vsc8201_driver = {
- .phy_id = PHY_ID_VSC8201,
- .name = "Micrel VSC8201",
+static struct phy_driver ks8001_driver = {
+ .phy_id = PHY_ID_KS8001,
+ .name = "Micrel KS8001 or KS8721",
.phy_id_mask = 0x00fffff0,
- .features = PHY_BASIC_FEATURES,
- .flags = PHY_POLL,
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
};
@@ -58,11 +168,14 @@ static struct phy_driver ksz9021_driver = {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000fff10,
.name = "Micrel KSZ9021 Gigabit PHY",
- .features = PHY_GBIT_FEATURES | SUPPORTED_Pause,
- .flags = PHY_POLL,
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
};
@@ -73,17 +186,29 @@ static int __init ksphy_init(void)
ret = phy_driver_register(&ks8001_driver);
if (ret)
goto err1;
- ret = phy_driver_register(&vsc8201_driver);
+
+ ret = phy_driver_register(&ksz9021_driver);
if (ret)
goto err2;
- ret = phy_driver_register(&ksz9021_driver);
+ ret = phy_driver_register(&ks8737_driver);
if (ret)
goto err3;
+ ret = phy_driver_register(&ks8041_driver);
+ if (ret)
+ goto err4;
+ ret = phy_driver_register(&ks8051_driver);
+ if (ret)
+ goto err5;
+
return 0;
+err5:
+ phy_driver_unregister(&ks8041_driver);
+err4:
+ phy_driver_unregister(&ks8737_driver);
err3:
- phy_driver_unregister(&vsc8201_driver);
+ phy_driver_unregister(&ksz9021_driver);
err2:
phy_driver_unregister(&ks8001_driver);
err1:
@@ -93,8 +218,10 @@ err1:
static void __exit ksphy_exit(void)
{
phy_driver_unregister(&ks8001_driver);
- phy_driver_unregister(&vsc8201_driver);
+ phy_driver_unregister(&ks8737_driver);
phy_driver_unregister(&ksz9021_driver);
+ phy_driver_unregister(&ks8041_driver);
+ phy_driver_unregister(&ks8051_driver);
}
module_init(ksphy_init);
@@ -106,8 +233,10 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000fff10 },
- { PHY_ID_VSC8201, 0x00fffff0 },
{ PHY_ID_KS8001, 0x00fffff0 },
+ { PHY_ID_KS8737, 0x00fffff0 },
+ { PHY_ID_KS8041, 0x00fffff0 },
+ { PHY_ID_KS8051, 0x00fffff0 },
{ }
};
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 02db363..60ea7cb 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 3
-#define QLCNIC_LINUX_VERSIONID "5.0.3"
+#define _QLCNIC_LINUX_SUBVERSION 6
+#define QLCNIC_LINUX_VERSIONID "5.0.6"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
@@ -68,6 +68,7 @@
#define QLCNIC_DECODE_VERSION(v) \
QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
#define QLCNIC_NUM_FLASH_SECTORS (64)
#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
@@ -112,8 +113,10 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
-#define MAX_BUFFERS_PER_CMD 32
-#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
/*
@@ -368,7 +371,7 @@ struct qlcnic_recv_crb {
*/
struct qlcnic_cmd_buffer {
struct sk_buff *skb;
- struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
@@ -378,7 +381,6 @@ struct qlcnic_rx_buffer {
struct sk_buff *skb;
u64 dma;
u16 ref_handle;
- u16 state;
};
/* Board types */
@@ -420,7 +422,6 @@ struct qlcnic_adapter_stats {
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
- u64 null_skb;
u64 null_rxbuf;
u64 rx_dma_map_error;
u64 tx_dma_map_error;
@@ -567,11 +568,12 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_LSO (1 << 6)
#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+#define QLCNIC_CAP0_VALIDOFF (1 << 11)
/*
* Context state
*/
-
+#define QLCNIC_HOST_CTX_STATE_FREED 0
#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
/*
@@ -602,9 +604,10 @@ struct qlcnic_hostrq_rx_ctx {
__le32 sds_ring_offset; /* Offset to SDS config */
__le16 num_rds_rings; /* Count of RDS rings */
__le16 num_sds_rings; /* Count of SDS rings */
- __le16 rsvd1; /* Padding */
- __le16 rsvd2; /* Padding */
- u8 reserved[128]; /* reserve space for future expansion*/
+ __le16 valid_field_offset;
+ u8 txrx_sds_binding;
+ u8 msix_handler;
+ u8 reserved[128]; /* reserve space for future expansion*/
/* MUST BE 64-bit aligned.
The following is packed:
- N hostrq_rds_rings
@@ -891,7 +894,7 @@ struct qlcnic_mac_req {
#define QLCNIC_LRO_ENABLED 0x08
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
-#define QLCNIC_NPAR_ENABLED 0x40
+#define QLCNIC_ESWITCH_ENABLED 0x40
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -931,6 +934,7 @@ struct qlcnic_adapter {
u8 rx_csum;
u8 portnum;
u8 physical_port;
+ u8 reset_context;
u8 mc_enabled;
u8 max_mc_count;
@@ -955,8 +959,6 @@ struct qlcnic_adapter {
u16 switch_mode;
u16 max_tx_ques;
u16 max_rx_ques;
- u16 min_tx_bw;
- u16 max_tx_bw;
u16 max_mtu;
u32 fw_hal_version;
@@ -980,7 +982,7 @@ struct qlcnic_adapter {
u64 dev_rst_time;
- struct qlcnic_pci_info *npars;
+ struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -998,8 +1000,6 @@ struct qlcnic_adapter {
struct delayed_work fw_work;
- struct work_struct tx_timeout_task;
-
struct qlcnic_nic_intr_coalesce coal;
unsigned long state;
@@ -1040,6 +1040,18 @@ struct qlcnic_pci_info {
u8 reserved2[106];
};
+struct qlcnic_npar_info {
+ u16 vlan_id;
+ u8 phy_port;
+ u8 type;
+ u8 active;
+ u8 enable_pm;
+ u8 dest_npar;
+ u8 host_vlan_tag;
+ u8 promisc_mode;
+ u8 discard_tagged;
+ u8 mac_learning;
+};
struct qlcnic_eswitch {
u8 port;
u8 active_vports;
@@ -1055,6 +1067,63 @@ struct qlcnic_eswitch {
#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
};
+
+/* Return codes for Error handling */
+#define QL_STATUS_INVALID_PARAM -1
+
+#define MAX_BW 10000
+#define MIN_BW 100
+#define MAX_VLAN_ID 4095
+#define MIN_VLAN_ID 2
+#define MAX_TX_QUEUES 1
+#define MAX_RX_QUEUES 4
+#define DEFAULT_MAC_LEARN 1
+
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW \
+ && (bw % 100) == 0)
+#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
+#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
+#define IS_VALID_MODE(mode) (mode == 0 || mode == 1)
+
+struct qlcnic_pci_func_cfg {
+ u16 func_type;
+ u16 min_bw;
+ u16 max_bw;
+ u16 port_num;
+ u8 pci_func;
+ u8 func_state;
+ u8 def_mac_addr[6];
+};
+
+struct qlcnic_npar_func_cfg {
+ u32 fw_capab;
+ u16 port_num;
+ u16 min_bw;
+ u16 max_bw;
+ u16 max_tx_queues;
+ u16 max_rx_queues;
+ u8 pci_func;
+ u8 op_mode;
+};
+
+struct qlcnic_pm_func_cfg {
+ u8 pci_func;
+ u8 action;
+ u8 dest_npar;
+ u8 reserved[5];
+};
+
+struct qlcnic_esw_func_cfg {
+ u16 vlan_id;
+ u8 pci_func;
+ u8 host_vlan_tag;
+ u8 promisc_mode;
+ u8 discard_tagged;
+ u8 mac_learning;
+ u8 reserved;
+};
+
int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
@@ -1102,13 +1171,13 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
/* Functions from qlcnic_init.c */
-int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1121,6 +1190,10 @@ void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
@@ -1163,9 +1236,9 @@ void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
/* Management functions */
int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
-int qlcnic_get_nic_info(struct qlcnic_adapter *, u8);
+int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_get_pci_info(struct qlcnic_adapter *);
+int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
/* eSwitch management functions */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1e1dc58..cdd44b4 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -152,9 +152,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
- cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+ | QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+ prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
+ msix_handler);
+ prq->txrx_sds_binding = nsds_rings - 1;
+
prq->capabilities[0] = cpu_to_le32(cap);
prq->host_int_crb_mode =
cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
@@ -175,6 +180,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < nrds_rings; i++) {
rds_ring = &recv_ctx->rds_rings[i];
+ rds_ring->producer = 0;
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
@@ -188,6 +194,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < nsds_rings; i++) {
sds_ring = &recv_ctx->sds_rings[i];
+ sds_ring->consumer = 0;
+ memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
@@ -216,12 +224,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[i];
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- if (adapter->fw_hal_version == QLCNIC_FW_BASE)
- rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
- QLCNIC_REG(reg - 0x200));
- else
- rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 +
- reg;
+ rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -233,16 +236,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
- sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
- QLCNIC_REG(reg - 0x200));
- sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
- QLCNIC_REG(reg2 - 0x200));
- } else {
- sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 +
- reg;
- sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
- }
+ sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg;
+ sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -272,6 +267,8 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
}
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
}
static int
@@ -288,6 +285,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
dma_addr_t rq_phys_addr, rsp_phys_addr;
struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ /* reset host resources */
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+ *(tx_ring->hw_consumer) = 0;
+
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
rq_addr = pci_alloc_consistent(adapter->pdev,
rq_size, &rq_phys_addr);
@@ -337,12 +339,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
- if (adapter->fw_hal_version == QLCNIC_FW_BASE)
- tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
- QLCNIC_REG(temp - 0x200));
- else
- tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 +
- temp;
+ tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp;
adapter->tx_context_id =
le16_to_cpu(prsp->context_id);
@@ -471,15 +468,6 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
sds_ring->desc_head = (struct status_desc *)addr;
}
-
- err = qlcnic_fw_cmd_create_rx_ctx(adapter);
- if (err)
- goto err_out_free;
- err = qlcnic_fw_cmd_create_tx_ctx(adapter);
- if (err)
- goto err_out_free;
-
- set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
return 0;
err_out_free:
@@ -487,15 +475,27 @@ err_out_free:
return err;
}
-void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
{
- struct qlcnic_recv_context *recv_ctx;
- struct qlcnic_host_rds_ring *rds_ring;
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
- int ring;
+ int err;
+
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ return err;
+ }
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+}
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_destroy_rx_ctx(adapter);
qlcnic_fw_cmd_destroy_tx_ctx(adapter);
@@ -503,6 +503,15 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
/* Allow dma queues to drain after context reset */
msleep(20);
}
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
recv_ctx = &adapter->recv_ctx;
@@ -589,11 +598,10 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
0,
QLCNIC_CDRP_CMD_MAC_ADDRESS);
- if (err == QLCNIC_RCODE_SUCCESS) {
+ if (err == QLCNIC_RCODE_SUCCESS)
qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
QLCNIC_ARG2_CRB_OFFSET, 0, mac);
- dev_info(&adapter->pdev->dev, "MAC address: %pM\n", mac);
- } else {
+ else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
err = -EIO;
@@ -603,7 +611,8 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
}
/* Get info of a NIC partition */
-int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id)
+int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
{
int err;
dma_addr_t nic_dma_t;
@@ -627,24 +636,23 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id)
QLCNIC_CDRP_CMD_GET_NIC_INFO);
if (err == QLCNIC_RCODE_SUCCESS) {
- adapter->physical_port = le16_to_cpu(nic_info->phys_port);
- adapter->switch_mode = le16_to_cpu(nic_info->switch_mode);
- adapter->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
- adapter->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
- adapter->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
- adapter->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
- adapter->max_mtu = le16_to_cpu(nic_info->max_mtu);
- adapter->capabilities = le32_to_cpu(nic_info->capabilities);
- adapter->max_mac_filters = nic_info->max_mac_filters;
+ npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
+ npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
+ npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+ npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+ npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
+ npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
dev_info(&adapter->pdev->dev,
"phy port: %d switch_mode: %d,\n"
"\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
"\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
- adapter->physical_port, adapter->switch_mode,
- adapter->max_tx_ques, adapter->max_rx_ques,
- adapter->min_tx_bw, adapter->max_tx_bw,
- adapter->max_mtu, adapter->capabilities);
+ npar_info->phys_port, npar_info->switch_mode,
+ npar_info->max_tx_ques, npar_info->max_rx_ques,
+ npar_info->min_tx_bw, npar_info->max_tx_bw,
+ npar_info->max_mtu, npar_info->capabilities);
} else {
dev_err(&adapter->pdev->dev,
"Failed to get nic info%d\n", err);
@@ -659,7 +667,6 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id)
int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
{
int err = -EIO;
- u32 func_state;
dma_addr_t nic_dma_t;
void *nic_info_addr;
struct qlcnic_info *nic_info;
@@ -668,17 +675,6 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return err;
- if (qlcnic_api_lock(adapter))
- return err;
-
- func_state = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- if (QLC_DEV_CHECK_ACTIVE(func_state, nic->pci_func)) {
- qlcnic_api_unlock(adapter);
- return err;
- }
-
- qlcnic_api_unlock(adapter);
-
nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
&nic_dma_t);
if (!nic_info_addr)
@@ -703,7 +699,7 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
adapter->fw_hal_version,
MSD(nic_dma_t),
LSD(nic_dma_t),
- nic_size,
+ ((nic->pci_func << 16) | nic_size),
QLCNIC_CDRP_CMD_SET_NIC_INFO);
if (err != QLCNIC_RCODE_SUCCESS) {
@@ -717,7 +713,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
}
/* Get PCI Info of a partition */
-int qlcnic_get_pci_info(struct qlcnic_adapter *adapter)
+int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
{
int err = 0, i;
dma_addr_t pci_info_dma_t;
@@ -732,21 +729,6 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter)
return -ENOMEM;
memset(pci_info_addr, 0, pci_size);
- if (!adapter->npars)
- adapter->npars = kzalloc(pci_size, GFP_KERNEL);
- if (!adapter->npars) {
- err = -ENOMEM;
- goto err_npar;
- }
-
- if (!adapter->eswitch)
- adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
- QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
- if (!adapter->eswitch) {
- err = -ENOMEM;
- goto err_eswitch;
- }
-
npar = (struct qlcnic_pci_info *) pci_info_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
@@ -757,31 +739,24 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter)
QLCNIC_CDRP_CMD_GET_PCI_INFO);
if (err == QLCNIC_RCODE_SUCCESS) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++) {
- adapter->npars[i].id = le32_to_cpu(npar->id);
- adapter->npars[i].active = le32_to_cpu(npar->active);
- adapter->npars[i].type = le32_to_cpu(npar->type);
- adapter->npars[i].default_port =
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ pci_info->id = le32_to_cpu(npar->id);
+ pci_info->active = le32_to_cpu(npar->active);
+ pci_info->type = le32_to_cpu(npar->type);
+ pci_info->default_port =
le32_to_cpu(npar->default_port);
- adapter->npars[i].tx_min_bw =
+ pci_info->tx_min_bw =
le32_to_cpu(npar->tx_min_bw);
- adapter->npars[i].tx_max_bw =
+ pci_info->tx_max_bw =
le32_to_cpu(npar->tx_max_bw);
- memcpy(adapter->npars[i].mac, npar->mac, ETH_ALEN);
+ memcpy(pci_info->mac, npar->mac, ETH_ALEN);
}
} else {
dev_err(&adapter->pdev->dev,
"Failed to get PCI Info%d\n", err);
- kfree(adapter->npars);
err = -EIO;
}
- goto err_npar;
-
-err_eswitch:
- kfree(adapter->npars);
- adapter->npars = NULL;
-err_npar:
pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
pci_info_dma_t);
return err;
@@ -999,9 +974,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
"Failed to configure eswitch port%d\n", eswitch->port);
- eswitch->flags |= QLCNIC_SWITCH_ENABLE;
} else {
- eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
dev_info(&adapter->pdev->dev,
"Configured eSwitch for port %d\n", eswitch->port);
}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 3e4822a..f8e39e4 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,8 +69,6 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
- {"null skb",
- QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
{"null rxbuf",
QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
@@ -350,7 +348,7 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
for (i = 0; diag_registers[i] != -1; i++)
regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
@@ -835,6 +833,9 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
struct qlcnic_adapter *adapter = netdev_priv(dev);
int ret;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return -EIO;
+
ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
if (ret) {
dev_err(&adapter->pdev->dev,
@@ -906,7 +907,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
/*
@@ -982,12 +983,19 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int hw_lro;
- if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+ if (data & ~ETH_FLAG_LRO)
return -EINVAL;
- ethtool_op_set_flags(netdev, data);
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+ return -EINVAL;
- hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+ if (data & ETH_FLAG_LRO) {
+ hw_lro = QLCNIC_LRO_ENABLED;
+ netdev->features |= NETIF_F_LRO;
+ } else {
+ hw_lro = 0;
+ netdev->features &= ~NETIF_F_LRO;
+ }
if (qlcnic_config_hw_lro(adapter, hw_lro))
return -EIO;
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 7b81cab..15fc320 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -778,12 +778,6 @@ enum {
QLCNIC_NON_PRIV_FUNC = 2
};
-/* FW HAL api version */
-enum {
- QLCNIC_FW_BASE = 1,
- QLCNIC_FW_NPAR = 2
-};
-
#define QLC_DEV_DRV_DEFAULT 0x11111111
#define LSB(x) ((uint8_t)(x))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index f776956..e08c8b0 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -327,7 +327,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
i = 0;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
tx_ring = adapter->tx_ring;
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
- __netif_tx_unlock_bh(tx_ring->txq);
- adapter->stats.xmit_off++;
- return -EBUSY;
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ adapter->stats.xmit_off++;
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
}
do {
@@ -407,10 +413,15 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
return -ENOMEM;
}
memcpy(cur->mac_addr, addr, ETH_ALEN);
- list_add_tail(&cur->list, &adapter->mac_list);
- return qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, QLCNIC_MAC_ADD);
+ if (qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_ADD)) {
+ kfree(cur);
+ return -EIO;
+ }
+
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return 0;
}
void qlcnic_set_multi(struct net_device *netdev)
@@ -420,7 +431,7 @@ void qlcnic_set_multi(struct net_device *netdev)
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
qlcnic_nic_add_mac(adapter, adapter->mac_addr);
@@ -760,7 +771,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
* Out: 'off' is 2M pci map addr
* side effect: lock crb window
*/
-static void
+static int
qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
{
u32 window;
@@ -769,6 +780,10 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
off -= QLCNIC_PCI_CRBSPACE;
window = CRB_HI(off);
+ if (window == 0) {
+ dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+ return -EIO;
+ }
writel(window, addr);
if (readl(addr) != window) {
@@ -776,7 +791,9 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
dev_warn(&adapter->pdev->dev,
"failed to set CRB window to %d off 0x%lx\n",
window, off);
+ return -EIO;
}
+ return 0;
}
int
@@ -797,11 +814,12 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
/* indirect access */
write_lock_irqsave(&adapter->ahw.crb_lock, flags);
crb_win_lock(adapter);
- qlcnic_pci_set_crbwindow_2M(adapter, off);
- writel(data, addr);
+ rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+ if (!rv)
+ writel(data, addr);
crb_win_unlock(adapter);
write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
- return 0;
+ return rv;
}
dev_err(&adapter->pdev->dev,
@@ -815,7 +833,7 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
{
unsigned long flags;
int rv;
- u32 data;
+ u32 data = -1;
void __iomem *addr = NULL;
rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
@@ -827,8 +845,8 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
/* indirect access */
write_lock_irqsave(&adapter->ahw.crb_lock, flags);
crb_win_lock(adapter);
- qlcnic_pci_set_crbwindow_2M(adapter, off);
- data = readl(addr);
+ if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+ data = readl(addr);
crb_win_unlock(adapter);
write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
return data;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 635c990..75ba744 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -112,18 +112,45 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[ring];
for (i = 0; i < rds_ring->num_desc; ++i) {
rx_buf = &(rds_ring->rx_buf_arr[i]);
- if (rx_buf->state == QLCNIC_BUFFER_FREE)
+ if (rx_buf->skb == NULL)
continue;
+
pci_unmap_single(adapter->pdev,
rx_buf->dma,
rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
- if (rx_buf->skb != NULL)
- dev_kfree_skb_any(rx_buf->skb);
+
+ dev_kfree_skb_any(rx_buf->skb);
}
}
}
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ spin_lock(&rds_ring->lock);
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf++;
+ }
+
+ spin_unlock(&rds_ring->lock);
+ }
+}
+
void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
{
struct qlcnic_cmd_buffer *cmd_buf;
@@ -181,7 +208,9 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
kfree(adapter->tx_ring);
+ adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -264,7 +293,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
list_add_tail(&rx_buf->list,
&rds_ring->free_list);
rx_buf->ref_handle = i;
- rx_buf->state = QLCNIC_BUFFER_FREE;
rx_buf++;
}
spin_lock_init(&rds_ring->lock);
@@ -413,7 +441,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
/* resetall */
qlcnic_rom_lock(adapter);
- QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
qlcnic_rom_unlock(adapter);
if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
@@ -520,16 +548,14 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
int timeo;
u32 val;
- if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
- val = QLC_DEV_GET_DRV(val, adapter->portnum);
- if ((val & 0x3) != QLCNIC_TYPE_NIC) {
- dev_err(&adapter->pdev->dev,
- "Not an Ethernet NIC func=%u\n", val);
- return -EIO;
- }
- adapter->physical_port = (val >> 2);
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_DEV_GET_DRV(val, adapter->portnum);
+ if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+ dev_err(&adapter->pdev->dev,
+ "Not an Ethernet NIC func=%u\n", val);
+ return -EIO;
}
+ adapter->physical_port = (val >> 2);
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
timeo = 30;
@@ -543,16 +569,34 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
return 0;
}
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+ u32 ver = -1, min_ver;
+
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+
+ ver = QLCNIC_DECODE_VERSION(ver);
+ min_ver = QLCNIC_MIN_FW_VERSION;
+
+ if (ver < min_ver) {
+ dev_err(&adapter->pdev->dev,
+ "firmware version %d.%d.%d unsupported."
+ "Min supported version %d.%d.%d\n",
+ _major(ver), _minor(ver), _build(ver),
+ _major(min_ver), _minor(min_ver), _build(min_ver));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
- u32 capability, flashed_ver;
+ u32 capability;
capability = 0;
- qlcnic_rom_fast_read(adapter,
- QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
- flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
-
capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
return 1;
@@ -1006,7 +1050,7 @@ static int
qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios, min_size;
+ u32 ver, bios, min_size;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
@@ -1028,12 +1072,9 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
val = qlcnic_get_fw_version(adapter);
-
- min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
-
ver = QLCNIC_DECODE_VERSION(val);
- if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+ if (ver < QLCNIC_MIN_FW_VERSION) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
@@ -1121,7 +1162,7 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
adapter->fw = NULL;
}
-int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
{
u32 val;
int retries = 60;
@@ -1146,7 +1187,8 @@ int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
out_err:
- dev_err(&adapter->pdev->dev, "firmware init failed\n");
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
return -EIO;
}
@@ -1179,6 +1221,10 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
{
int err;
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
err = qlcnic_receive_peg_ready(adapter);
if (err)
return err;
@@ -1264,14 +1310,12 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
- buffer->skb = dev_alloc_skb(rds_ring->skb_size);
- if (!buffer->skb) {
+ skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!skb) {
adapter->stats.skb_alloc_failure++;
return -ENOMEM;
}
- skb = buffer->skb;
-
skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data,
@@ -1280,13 +1324,11 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
if (pci_dma_mapping_error(pdev, dma)) {
adapter->stats.rx_dma_map_error++;
dev_kfree_skb_any(skb);
- buffer->skb = NULL;
return -ENOMEM;
}
buffer->skb = skb;
buffer->dma = dma;
- buffer->state = QLCNIC_BUFFER_BUSY;
return 0;
}
@@ -1299,14 +1341,15 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index];
+ if (unlikely(buffer->skb == NULL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
- if (!skb) {
- adapter->stats.null_skb++;
- goto no_skb;
- }
if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
@@ -1318,8 +1361,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb->dev = adapter->netdev;
buffer->skb = NULL;
-no_skb:
- buffer->state = QLCNIC_BUFFER_FREE;
+
return skb;
}
@@ -1494,7 +1536,7 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
WARN_ON(desc_cnt > 1);
- if (rxbuf)
+ if (likely(rxbuf))
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
else
adapter->stats.null_rxbuf++;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 99371bc..18e2b2e 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -35,14 +35,14 @@
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
-MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
char qlcnic_driver_name[] = "qlcnic";
-static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
- QLCNIC_LINUX_VERSIONID;
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+ "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
@@ -75,7 +75,6 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
-static void qlcnic_tx_timeout_task(struct work_struct *work);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
static void qlcnic_fw_poll_work(struct work_struct *work);
@@ -83,6 +82,7 @@ static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
work_func_t func, int delay);
static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
static int qlcnic_poll(struct napi_struct *napi, int budget);
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
@@ -131,12 +131,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
writel(tx_ring->producer, tx_ring->crb_cmd_producer);
-
- if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
- netif_stop_queue(adapter->netdev);
- smp_mb();
- adapter->stats.xmit_off++;
- }
}
static const u32 msi_tgt_status[8] = {
@@ -195,8 +189,13 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+
+ if (ring == adapter->max_sds_rings - 1)
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+ QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
}
return 0;
@@ -346,7 +345,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (netif_running(netdev)) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
qlcnic_napi_disable(adapter);
}
@@ -355,7 +354,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
qlcnic_set_multi(adapter->netdev);
- if (netif_running(netdev)) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_attach(netdev);
qlcnic_napi_enable(adapter);
}
@@ -378,15 +377,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
};
static struct qlcnic_nic_template qlcnic_ops = {
- .get_mac_addr = qlcnic_get_mac_addr,
- .config_bridged_mode = qlcnic_config_bridged_mode,
- .config_led = qlcnic_config_led,
- .set_ilb_mode = qlcnic_set_ilb_mode,
- .clear_ilb_mode = qlcnic_clear_ilb_mode,
- .start_firmware = qlcnic_start_firmware
-};
-
-static struct qlcnic_nic_template qlcnic_pf_ops = {
.get_mac_addr = qlcnic_get_mac_address,
.config_bridged_mode = qlcnic_config_bridged_mode,
.config_led = qlcnic_config_led,
@@ -486,6 +476,53 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
}
static int
+qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+ int i, ret = 0, err;
+ u8 pfn;
+
+ if (!adapter->npars)
+ adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+ QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+ if (!adapter->npars)
+ return -ENOMEM;
+
+ if (!adapter->eswitch)
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+ if (!adapter->eswitch) {
+ err = -ENOMEM;
+ goto err_eswitch;
+ }
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (!ret) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pfn = pci_info[i].id;
+ if (pfn > QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+ adapter->npars[pfn].active = pci_info[i].active;
+ adapter->npars[pfn].type = pci_info[i].type;
+ adapter->npars[pfn].phy_port = pci_info[i].default_port;
+ adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
+ }
+
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+ return ret;
+ }
+
+ kfree(adapter->eswitch);
+ adapter->eswitch = NULL;
+err_eswitch:
+ kfree(adapter->npars);
+
+ return ret;
+}
+
+static int
qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
{
u8 id;
@@ -502,58 +539,40 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
goto err_npar;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- id = adapter->npars[i].id;
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
- id == adapter->ahw.pci_func)
- continue;
- data |= (qlcnic_config_npars & QLC_DEV_SET_DRV(0xf, id));
+ if (qlcnic_config_npars) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ id = i;
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
+ id == adapter->ahw.pci_func)
+ continue;
+ data |= (qlcnic_config_npars &
+ QLC_DEV_SET_DRV(0xf, id));
+ }
+ } else {
+ data = readl(priv_op);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
+ (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
+ adapter->ahw.pci_func));
}
writel(data, priv_op);
-
err_npar:
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static u8
-qlcnic_set_mgmt_driver(struct qlcnic_adapter *adapter)
-{
- u8 i, ret = 0;
-
- if (qlcnic_get_pci_info(adapter))
- return ret;
- /* Set the eswitch */
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
- if (!qlcnic_get_eswitch_capabilities(adapter, i,
- &adapter->eswitch[i])) {
- ret++;
- qlcnic_toggle_eswitch(adapter, i, ret);
- }
- }
- return ret;
-}
-
static u32
qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
{
void __iomem *msix_base_addr;
void __iomem *priv_op;
+ struct qlcnic_info nic_info;
u32 func;
u32 msix_base;
u32 op_mode, priv_level;
/* Determine FW API version */
adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
- if (adapter->fw_hal_version == ~0) {
- adapter->nic_ops = &qlcnic_ops;
- adapter->fw_hal_version = QLCNIC_FW_BASE;
- adapter->ahw.pci_func = PCI_FUNC(adapter->pdev->devfn);
- dev_info(&adapter->pdev->dev,
- "FW does not support nic partion\n");
- return adapter->fw_hal_version;
- }
/* Find PCI function number */
pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
@@ -562,29 +581,35 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
adapter->ahw.pci_func = func;
+ if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
+ adapter->capabilities = nic_info.capabilities;
+
+ if (adapter->capabilities & BIT_6)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ adapter->nic_ops = &qlcnic_ops;
+ return adapter->fw_hal_version;
+ }
+
/* Determine function privilege level */
priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
- if (op_mode == QLC_DEV_DRV_DEFAULT) {
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
- if (qlcnic_api_lock(adapter))
- return 0;
- op_mode = (op_mode & ~QLC_DEV_SET_DRV(0xf, func)) |
- (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, func));
- writel(op_mode, priv_op);
- qlcnic_api_unlock(adapter);
-
- } else
+ else
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
switch (priv_level) {
case QLCNIC_MGMT_FUNC:
adapter->op_mode = QLCNIC_MGMT_FUNC;
- adapter->nic_ops = &qlcnic_pf_ops;
+ adapter->nic_ops = &qlcnic_ops;
+ qlcnic_init_pci_info(adapter);
/* Set privilege level for other functions */
- if (qlcnic_config_npars)
- qlcnic_set_function_modes(adapter);
- qlcnic_dev_set_npar_ready(adapter);
+ qlcnic_set_function_modes(adapter);
dev_info(&adapter->pdev->dev,
"HAL Version: %d, Management function\n",
adapter->fw_hal_version);
@@ -594,7 +619,7 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev,
"HAL Version: %d, Privileged function\n",
adapter->fw_hal_version);
- adapter->nic_ops = &qlcnic_pf_ops;
+ adapter->nic_ops = &qlcnic_ops;
break;
case QLCNIC_NON_PRIV_FUNC:
adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
@@ -672,7 +697,7 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
}
if (!found)
- name = "Unknown";
+ sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
}
static void
@@ -684,7 +709,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
int i, offset, val;
int *ptr32;
struct pci_dev *pdev = adapter->pdev;
-
+ struct qlcnic_info nic_info;
adapter->driver_mismatch = 0;
ptr32 = (int *)&serial_num;
@@ -716,11 +741,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
- if (adapter->fw_hal_version == QLCNIC_FW_NPAR)
- qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
- else
- adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
-
adapter->flags &= ~QLCNIC_LRO_ENABLED;
if (adapter->ahw.port_type == QLCNIC_XGBE) {
@@ -731,6 +751,16 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
}
+ if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
+ adapter->physical_port = nic_info.phys_port;
+ adapter->switch_mode = nic_info.switch_mode;
+ adapter->max_tx_ques = nic_info.max_tx_ques;
+ adapter->max_rx_ques = nic_info.max_rx_ques;
+ adapter->capabilities = nic_info.capabilities;
+ adapter->max_mac_filters = nic_info.max_mac_filters;
+ adapter->max_mtu = nic_info.max_mtu;
+ }
+
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
@@ -757,8 +787,12 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
if (load_fw_file)
qlcnic_request_firmware(adapter);
- else
+ else {
+ if (qlcnic_check_flash_fw_ver(adapter))
+ goto err_out;
+
adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
+ }
err = qlcnic_need_fw_reset(adapter);
if (err < 0)
@@ -768,6 +802,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
if (first_boot != 0x55555555) {
QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
qlcnic_pinit_from_rom(adapter);
msleep(1);
}
@@ -790,20 +825,18 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
wait_init:
/* Handshake with the card before we register the devices. */
- err = qlcnic_phantom_init(adapter);
+ err = qlcnic_init_firmware(adapter);
if (err)
goto err_out;
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
- qlcnic_dev_set_npar_ready(adapter);
-
qlcnic_check_options(adapter);
- if (adapter->fw_hal_version != QLCNIC_FW_BASE &&
- adapter->op_mode == QLCNIC_MGMT_FUNC)
- qlcnic_set_mgmt_driver(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED &&
+ adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
+ qlcnic_dev_set_npar_ready(adapter);
adapter->need_fw_reset = 0;
@@ -888,9 +921,23 @@ qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return -EIO;
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return 0;
+
+ if (qlcnic_fw_create_ctx(adapter))
+ return -EIO;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
qlcnic_set_multi(netdev);
qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
@@ -908,6 +955,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_linkevent_request(adapter, 1);
+ adapter->reset_context = 0;
set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -947,6 +995,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_napi_disable(adapter);
+ qlcnic_fw_destroy_ctx(adapter);
+
+ qlcnic_reset_rx_buffers_list(adapter);
qlcnic_release_tx_buffers(adapter);
spin_unlock(&adapter->tx_clean_lock);
}
@@ -968,16 +1019,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- int err, ring;
- struct qlcnic_host_rds_ring *rds_ring;
+ int err;
if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
return 0;
- err = qlcnic_init_firmware(adapter);
- if (err)
- return err;
-
err = qlcnic_napi_add(adapter, netdev);
if (err)
return err;
@@ -985,7 +1031,7 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
err = qlcnic_alloc_sw_resources(adapter);
if (err) {
dev_err(&pdev->dev, "Error in setting sw resources\n");
- return err;
+ goto err_out_napi_del;
}
err = qlcnic_alloc_hw_resources(adapter);
@@ -994,16 +1040,10 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
goto err_out_free_sw;
}
-
- for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- rds_ring = &adapter->recv_ctx.rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, ring, rds_ring);
- }
-
err = qlcnic_request_irq(adapter);
if (err) {
dev_err(&pdev->dev, "failed to setup interrupt\n");
- goto err_out_free_rxbuf;
+ goto err_out_free_hw;
}
qlcnic_init_coalesce_defaults(adapter);
@@ -1013,11 +1053,12 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
-err_out_free_rxbuf:
- qlcnic_release_rx_buffers(adapter);
+err_out_free_hw:
qlcnic_free_hw_resources(adapter);
err_out_free_sw:
qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+ qlcnic_napi_del(adapter);
return err;
}
@@ -1052,6 +1093,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
}
}
+ qlcnic_fw_destroy_ctx(adapter);
+
qlcnic_detach(adapter);
adapter->diag_test = 0;
@@ -1070,6 +1113,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
int ring;
int ret;
@@ -1089,6 +1133,17 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
return ret;
}
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -1100,6 +1155,27 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
return 0;
}
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
int
qlcnic_reset_context(struct qlcnic_adapter *adapter)
{
@@ -1143,7 +1219,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
adapter->max_mc_count = 38;
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 2*HZ;
+ netdev->watchdog_timeo = 5*HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
@@ -1168,8 +1244,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
- INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
-
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -1340,8 +1414,6 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
- cancel_work_sync(&adapter->tx_timeout_task);
-
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -1380,10 +1452,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- cancel_work_sync(&adapter->tx_timeout_task);
-
- qlcnic_detach(adapter);
-
qlcnic_clr_all_drv_state(adapter);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1444,28 +1512,16 @@ qlcnic_resume(struct pci_dev *pdev)
}
if (netif_running(netdev)) {
- err = qlcnic_attach(adapter);
- if (err)
- goto err_out;
-
err = qlcnic_up(adapter, netdev);
if (err)
- goto err_out_detach;
-
+ goto done;
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
-
+done:
netif_device_attach(netdev);
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
return 0;
-
-err_out_detach:
- qlcnic_detach(adapter);
-err_out:
- qlcnic_clr_all_drv_state(adapter);
- netif_device_attach(netdev);
- return err;
}
#endif
@@ -1715,10 +1771,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
- if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
- adapter->stats.xmit_off++;
- return NETDEV_TX_BUSY;
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else {
+ adapter->stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
}
producer = tx_ring->producer;
@@ -1853,35 +1914,11 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
return;
dev_err(&netdev->dev, "transmit timeout, resetting.\n");
- schedule_work(&adapter->tx_timeout_task);
-}
-
-static void qlcnic_tx_timeout_task(struct work_struct *work)
-{
- struct qlcnic_adapter *adapter =
- container_of(work, struct qlcnic_adapter, tx_timeout_task);
-
- if (!netif_running(adapter->netdev))
- return;
-
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- return;
if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
- goto request_reset;
-
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- if (!qlcnic_reset_context(adapter)) {
- adapter->netdev->trans_start = jiffies;
- return;
-
- /* context reset failed, fall through for fw reset */
- }
-
-request_reset:
- adapter->need_fw_reset = 1;
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- QLCDB(adapter, DRV, "Resetting adapter\n");
+ adapter->need_fw_reset = 1;
+ else
+ adapter->reset_context = 1;
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -2024,14 +2061,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
- __netif_tx_lock(tx_ring->txq, smp_processor_id());
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
- adapter->tx_timeo_cnt = 0;
adapter->stats.xmit_on++;
}
- __netif_tx_unlock(tx_ring->txq);
}
+ adapter->tx_timeo_cnt = 0;
}
/*
* If everything is freed up to consumer then check if the ring is full
@@ -2076,6 +2111,25 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ int work_done;
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
@@ -2394,10 +2448,6 @@ qlcnic_detach_work(struct work_struct *work)
qlcnic_down(adapter, netdev);
- rtnl_lock();
- qlcnic_detach(adapter);
- rtnl_unlock();
-
status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (status & QLCNIC_RCODE_FATAL_ERROR)
@@ -2449,10 +2499,6 @@ qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
{
u32 state;
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC ||
- adapter->fw_hal_version == QLCNIC_FW_BASE)
- return;
-
if (qlcnic_api_lock(adapter))
return;
@@ -2490,18 +2536,10 @@ qlcnic_attach_work(struct work_struct *work)
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
- int err;
if (netif_running(netdev)) {
- err = qlcnic_attach(adapter);
- if (err)
- goto done;
-
- err = qlcnic_up(adapter, netdev);
- if (err) {
- qlcnic_detach(adapter);
+ if (qlcnic_up(adapter, netdev))
goto done;
- }
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
@@ -2538,6 +2576,12 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
adapter->fw_fail_cnt = 0;
if (adapter->need_fw_reset)
goto detach;
+
+ if (adapter->reset_context) {
+ qlcnic_reset_hw_context(adapter);
+ adapter->netdev->trans_start = jiffies;
+ }
+
return 0;
}
@@ -2632,7 +2676,7 @@ qlcnic_store_bridged_mode(struct device *dev,
if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
goto err_out;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto err_out;
if (strict_strtoul(buf, 2, &new))
@@ -2841,6 +2885,364 @@ static struct bin_attribute bin_attr_mem = {
.write = qlcnic_sysfs_write_mem,
};
+int
+validate_pm_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ src_pci_func = pm_cfg[i].pci_func;
+ dest_pci_func = pm_cfg[i].dest_npar;
+ if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
+ || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (!IS_VALID_MODE(pm_cfg[i].action))
+ return QL_STATUS_INVALID_PARAM;
+
+ s_esw_id = adapter->npars[src_pci_func].phy_port;
+ d_esw_id = adapter->npars[dest_pci_func].phy_port;
+
+ if (s_esw_id != d_esw_id)
+ return QL_STATUS_INVALID_PARAM;
+
+ }
+ return 0;
+
+}
+
+static ssize_t
+qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u32 id, action, pci_func;
+ int count, rem, i, ret;
+
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ rem = size % sizeof(struct qlcnic_pm_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
+
+ ret = validate_pm_config(adapter, pm_cfg, count);
+ if (ret)
+ return ret;
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ action = pm_cfg[i].action;
+ id = adapter->npars[pci_func].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ id = adapter->npars[pci_func].phy_port;
+ adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
+ adapter->npars[pci_func].dest_npar = id;
+ }
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i;
+
+ if (size != sizeof(pm_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ pm_cfg[i].action = adapter->npars[i].enable_pm;
+ pm_cfg[i].dest_npar = 0;
+ pm_cfg[i].pci_func = i;
+ }
+ memcpy(buf, &pm_cfg, size);
+
+ return size;
+}
+
+int
+validate_esw_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+ u8 pci_func;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (esw_cfg->host_vlan_tag == 1)
+ if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
+ || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
+ || !IS_VALID_MODE(esw_cfg[i].mac_learning)
+ || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
+ return QL_STATUS_INVALID_PARAM;
+ }
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ u8 id, discard_tagged, promsc_mode, mac_learn;
+ u8 vlan_tagging, pci_func, vlan_id;
+ int count, rem, i, ret;
+
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ rem = size % sizeof(struct qlcnic_esw_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
+ ret = validate_esw_config(adapter, esw_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ id = adapter->npars[pci_func].phy_port;
+ vlan_tagging = esw_cfg[i].host_vlan_tag;
+ promsc_mode = esw_cfg[i].promisc_mode;
+ mac_learn = esw_cfg[i].mac_learning;
+ vlan_id = esw_cfg[i].vlan_id;
+ discard_tagged = esw_cfg[i].discard_tagged;
+ ret = qlcnic_config_switch_port(adapter, id, vlan_tagging,
+ discard_tagged,
+ promsc_mode,
+ mac_learn,
+ pci_func,
+ vlan_id);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
+ adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
+ adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
+ adapter->npars[pci_func].discard_tagged =
+ esw_cfg[i].discard_tagged;
+ adapter->npars[pci_func].host_vlan_tag =
+ esw_cfg[i].host_vlan_tag;
+ }
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i;
+
+ if (size != sizeof(esw_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+
+ esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
+ esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
+ esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
+ esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
+ esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
+ }
+ memcpy(buf, &esw_cfg, size);
+
+ return size;
+}
+
+int
+validate_npar_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_func_cfg *np_cfg, int count)
+{
+ u8 pci_func, i;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+ if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (!IS_VALID_BW(np_cfg[i].min_bw)
+ || !IS_VALID_BW(np_cfg[i].max_bw)
+ || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
+ || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg *np_cfg;
+ int i, count, rem, ret;
+ u8 pci_func;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ rem = size % sizeof(struct qlcnic_npar_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ np_cfg = (struct qlcnic_npar_func_cfg *) buf;
+ ret = validate_npar_config(adapter, np_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count ; i++) {
+ pci_func = np_cfg[i].pci_func;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+ nic_info.pci_func = pci_func;
+ nic_info.min_tx_bw = np_cfg[i].min_bw;
+ nic_info.max_tx_bw = np_cfg[i].max_bw;
+ ret = qlcnic_set_nic_info(adapter, &nic_info);
+ if (ret)
+ return ret;
+ }
+
+ return size;
+
+}
+static ssize_t
+qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i, ret;
+
+ if (size != sizeof(np_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+ if (ret)
+ return ret;
+
+ np_cfg[i].pci_func = i;
+ np_cfg[i].op_mode = nic_info.op_mode;
+ np_cfg[i].port_num = nic_info.phys_port;
+ np_cfg[i].fw_capab = nic_info.capabilities;
+ np_cfg[i].min_bw = nic_info.min_tx_bw ;
+ np_cfg[i].max_bw = nic_info.max_tx_bw;
+ np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+ }
+ memcpy(buf, &np_cfg, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+ int i, ret;
+
+ if (size != sizeof(pci_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ pci_cfg[i].pci_func = pci_info[i].id;
+ pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].port_num = pci_info[i].default_port;
+ pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+ pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+ }
+ memcpy(buf, &pci_cfg, size);
+ return size;
+
+}
+static struct bin_attribute bin_attr_npar_config = {
+ .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_npar_config,
+ .write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+ .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pci_config,
+ .write = NULL,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+ .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_esw_config,
+ .write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+ .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pm_config,
+ .write = qlcnic_sysfs_write_pm_config,
+};
+
static void
qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
@@ -2872,6 +3274,18 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_npar_config))
+ dev_info(dev, "failed to create npar config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_pm_config))
+ dev_info(dev, "failed to create pm config sysfs entry");
+
}
@@ -2883,6 +3297,13 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ device_remove_bin_file(dev, &bin_attr_pci_config);
+ device_remove_bin_file(dev, &bin_attr_npar_config);
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ device_remove_bin_file(dev, &bin_attr_pm_config);
}
#ifdef CONFIG_INET
@@ -2940,7 +3361,7 @@ recheck:
if (!adapter)
goto done;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto done;
qlcnic_config_indev_addr(dev, event);
@@ -2976,7 +3397,7 @@ recheck:
if (!adapter)
goto done;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto done;
switch (event) {
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 668327c..22371f1 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -38,7 +38,7 @@
* Tx descriptors that can be associated with each corresponding FIFO.
* intr_type: This defines the type of interrupt. The values can be 0(INTA),
* 2(MSI_X). Default value is '2(MSI_X)'
- * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
+ * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
* Possible values '1' for enable '0' for disable. Default is '0'
* lro_max_pkts: This parameter defines maximum number of packets can be
* aggregated as a single large packet
@@ -90,7 +90,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.25"
+#define DRV_VERSION "2.0.26.26"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -496,7 +496,7 @@ S2IO_PARM_INT(rxsync_frequency, 3);
/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
S2IO_PARM_INT(intr_type, 2);
/* Large receive offload feature */
-static unsigned int lro_enable;
+static unsigned int lro_enable = 1;
module_param_named(lro, lro_enable, uint, 0);
/* Max pkts to be aggregated by LRO at one time. If not specified,
@@ -795,7 +795,6 @@ static int init_shared_mem(struct s2io_nic *nic)
ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
ring->nic = nic;
ring->ring_no = i;
- ring->lro = lro_enable;
blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
/* Allocating all the Rx blocks */
@@ -6675,6 +6674,7 @@ static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
{
return (dev->features & NETIF_F_TSO) != 0;
}
+
static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
{
if (data)
@@ -6685,6 +6685,42 @@ static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
return 0;
}
+static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ int rc = 0;
+ int changed = 0;
+
+ if (data & ~ETH_FLAG_LRO)
+ return -EINVAL;
+
+ if (data & ETH_FLAG_LRO) {
+ if (lro_enable) {
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ changed = 1;
+ }
+ } else
+ rc = -EINVAL;
+ } else if (dev->features & NETIF_F_LRO) {
+ dev->features &= ~NETIF_F_LRO;
+ changed = 1;
+ }
+
+ if (changed && netif_running(dev)) {
+ s2io_stop_all_tx_queue(sp);
+ s2io_card_down(sp);
+ sp->lro = !!(dev->features & NETIF_F_LRO);
+ rc = s2io_card_up(sp);
+ if (rc)
+ s2io_reset(sp);
+ else
+ s2io_start_all_tx_queue(sp);
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = s2io_ethtool_gset,
.set_settings = s2io_ethtool_sset,
@@ -6701,6 +6737,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_rx_csum = s2io_ethtool_get_rx_csum,
.set_rx_csum = s2io_ethtool_set_rx_csum,
.set_tx_csum = s2io_ethtool_op_set_tx_csum,
+ .set_flags = s2io_ethtool_set_flags,
+ .get_flags = ethtool_op_get_flags,
.set_sg = ethtool_op_set_sg,
.get_tso = s2io_ethtool_op_get_tso,
.set_tso = s2io_ethtool_op_set_tso,
@@ -7229,6 +7267,7 @@ static int s2io_card_up(struct s2io_nic *sp)
struct ring_info *ring = &mac_control->rings[i];
ring->mtu = dev->mtu;
+ ring->lro = sp->lro;
ret = fill_rx_buffers(sp, ring, 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -7974,7 +8013,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->netdev_ops = &s2io_netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-
+ if (lro_enable)
+ dev->features |= NETIF_F_LRO;
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 8ad476a..35b3f29 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -189,6 +189,13 @@ module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
/**************************************************************************
*
* Utility functions and prototypes
@@ -272,16 +279,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
int spent;
- EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
- channel->channel, raw_smp_processor_id());
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
spent = efx_process_channel(channel, budget);
if (spent < budget) {
- struct efx_nic *efx = channel->efx;
-
if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
@@ -357,7 +364,8 @@ void efx_process_channel_now(struct efx_channel *channel)
*/
static int efx_probe_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
return efx_nic_probe_eventq(channel);
}
@@ -365,7 +373,8 @@ static int efx_probe_eventq(struct efx_channel *channel)
/* Prepare channel's event queue */
static void efx_init_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
channel->eventq_read_ptr = 0;
@@ -374,14 +383,16 @@ static void efx_init_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
}
static void efx_remove_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
efx_nic_remove_eventq(channel);
}
@@ -398,7 +409,8 @@ static int efx_probe_channel(struct efx_channel *channel)
struct efx_rx_queue *rx_queue;
int rc;
- EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
rc = efx_probe_eventq(channel);
if (rc)
@@ -468,13 +480,15 @@ static void efx_init_channels(struct efx_nic *efx)
*/
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_hash_size +
efx->type->rx_buffer_padding);
efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state));
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "init chan %d\n", channel->channel);
efx_init_eventq(channel);
@@ -501,7 +515,8 @@ static void efx_start_channel(struct efx_channel *channel)
{
struct efx_rx_queue *rx_queue;
- EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "starting chan %d\n", channel->channel);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
@@ -526,7 +541,8 @@ static void efx_stop_channel(struct efx_channel *channel)
if (!channel->enabled)
return;
- EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
+ netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
+ "stop chan %d\n", channel->channel);
channel->enabled = false;
napi_disable(&channel->napi_str);
@@ -548,16 +564,19 @@ static void efx_fini_channels(struct efx_nic *efx)
* descriptor caches reference memory we're about to free,
* but falcon_reconfigure_mac_wrapper() won't reconnect
* the MACs because of the pending reset. */
- EFX_ERR(efx, "Resetting to recover from flush failure\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
- EFX_ERR(efx, "failed to flush queues\n");
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
- EFX_LOG(efx, "successfully flushed all queues\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
}
efx_for_each_channel(channel, efx) {
- EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "shut down chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
@@ -572,7 +591,8 @@ static void efx_remove_channel(struct efx_channel *channel)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
@@ -623,12 +643,13 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (link_state->up) {
- EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
- link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu,
- (efx->promiscuous ? " [PROMISC]" : ""));
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu,
+ (efx->promiscuous ? " [PROMISC]" : ""));
} else {
- EFX_INFO(efx, "link down\n");
+ netif_info(efx, link, efx->net_dev, "link down\n");
}
}
@@ -732,7 +753,7 @@ static int efx_probe_port(struct efx_nic *efx)
{
int rc;
- EFX_LOG(efx, "create port\n");
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
@@ -746,15 +767,16 @@ static int efx_probe_port(struct efx_nic *efx)
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
} else {
- EFX_ERR(efx, "invalid MAC address %pM\n",
- efx->mac_address);
+ netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
+ efx->mac_address);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
}
random_ether_addr(efx->net_dev->dev_addr);
- EFX_INFO(efx, "using locally-generated MAC %pM\n",
- efx->net_dev->dev_addr);
+ netif_info(efx, probe, efx->net_dev,
+ "using locally-generated MAC %pM\n",
+ efx->net_dev->dev_addr);
}
return 0;
@@ -768,7 +790,7 @@ static int efx_init_port(struct efx_nic *efx)
{
int rc;
- EFX_LOG(efx, "init port\n");
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
mutex_lock(&efx->mac_lock);
@@ -799,7 +821,7 @@ fail1:
static void efx_start_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "start port\n");
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
@@ -816,7 +838,7 @@ static void efx_start_port(struct efx_nic *efx)
/* Prevent efx_mac_work() and efx_monitor() from working */
static void efx_stop_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "stop port\n");
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
@@ -831,7 +853,7 @@ static void efx_stop_port(struct efx_nic *efx)
static void efx_fini_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "shut down port\n");
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
if (!efx->port_initialized)
return;
@@ -845,7 +867,7 @@ static void efx_fini_port(struct efx_nic *efx)
static void efx_remove_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "destroying port\n");
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
efx->type->remove_port(efx);
}
@@ -863,11 +885,12 @@ static int efx_init_io(struct efx_nic *efx)
dma_addr_t dma_mask = efx->type->max_dma_mask;
int rc;
- EFX_LOG(efx, "initialising I/O\n");
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
rc = pci_enable_device(pci_dev);
if (rc) {
- EFX_ERR(efx, "failed to enable PCI device\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
goto fail1;
}
@@ -885,39 +908,45 @@ static int efx_init_io(struct efx_nic *efx)
dma_mask >>= 1;
}
if (rc) {
- EFX_ERR(efx, "could not find a suitable DMA mask\n");
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
goto fail2;
}
- EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long) dma_mask);
rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
if (rc) {
/* pci_set_consistent_dma_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask() accepted,
* but just in case...
*/
- EFX_ERR(efx, "failed to set consistent DMA mask\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to set consistent DMA mask\n");
goto fail2;
}
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
if (rc) {
- EFX_ERR(efx, "request for memory BAR failed\n");
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
rc = -EIO;
goto fail3;
}
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) {
- EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size);
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size, efx->membase);
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size, efx->membase);
return 0;
@@ -933,7 +962,7 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
- EFX_LOG(efx, "shutting down I/O\n");
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
@@ -997,9 +1026,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
xentries[i].entry = i;
rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
- EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, n_channels);
- EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %d).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
@@ -1023,7 +1054,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
- EFX_ERR(efx, "could not enable MSI-X\n");
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
}
}
@@ -1036,7 +1068,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
} else {
- EFX_ERR(efx, "could not enable MSI\n");
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
efx->interrupt_mode = EFX_INT_MODE_LEGACY;
}
}
@@ -1088,9 +1121,10 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
+ size_t i;
int rc;
- EFX_LOG(efx, "creating NIC\n");
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
/* Carry out hardware-type specific initialisation */
rc = efx->type->probe(efx);
@@ -1101,6 +1135,11 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
+ if (efx->n_channels > 1)
+ get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] = i % efx->n_rx_channels;
+
efx_set_channels(efx);
efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
@@ -1112,7 +1151,7 @@ static int efx_probe_nic(struct efx_nic *efx)
static void efx_remove_nic(struct efx_nic *efx)
{
- EFX_LOG(efx, "destroying NIC\n");
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
efx_remove_interrupts(efx);
efx->type->remove(efx);
@@ -1132,14 +1171,14 @@ static int efx_probe_all(struct efx_nic *efx)
/* Create NIC */
rc = efx_probe_nic(efx);
if (rc) {
- EFX_ERR(efx, "failed to create NIC\n");
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
/* Create port */
rc = efx_probe_port(efx);
if (rc) {
- EFX_ERR(efx, "failed to create port\n");
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
@@ -1147,8 +1186,9 @@ static int efx_probe_all(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
- EFX_ERR(efx, "failed to create channel %d\n",
- channel->channel);
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
goto fail3;
}
}
@@ -1344,8 +1384,9 @@ static void efx_monitor(struct work_struct *data)
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work.work);
- EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
- raw_smp_processor_id());
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
BUG_ON(efx->type->monitor == NULL);
/* If the mac_lock is already held then it is likely a port
@@ -1452,8 +1493,8 @@ static int efx_net_open(struct net_device *net_dev)
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
- EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
- raw_smp_processor_id());
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
if (efx->state == STATE_DISABLED)
return -EIO;
@@ -1478,8 +1519,8 @@ static int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
- raw_smp_processor_id());
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */
@@ -1532,8 +1573,9 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
- efx->port_enabled);
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1552,7 +1594,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
efx_stop_all(efx);
- EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_fini_channels(efx);
@@ -1578,8 +1620,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
- EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
- new_addr);
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
return -EINVAL;
}
@@ -1682,7 +1725,6 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
- SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
/* Clear MAC statistics */
@@ -1707,7 +1749,8 @@ static int efx_register_netdev(struct efx_nic *efx)
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
if (rc) {
- EFX_ERR(efx, "failed to init net dev attributes\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
goto fail_registered;
}
@@ -1715,7 +1758,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_locked:
rtnl_unlock();
- EFX_ERR(efx, "could not register net dev\n");
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
fail_registered:
@@ -1780,7 +1823,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx->type->init(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise NIC\n");
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
goto fail;
}
@@ -1792,7 +1835,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (rc)
goto fail;
if (efx->phy_op->reconfigure(efx))
- EFX_ERR(efx, "could not restore PHY settings\n");
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
}
efx->mac_op->reconfigure(efx);
@@ -1825,13 +1869,14 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
int rc, rc2;
bool disabled;
- EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
- EFX_ERR(efx, "failed to reset hardware\n");
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
goto out;
}
@@ -1856,10 +1901,10 @@ out:
if (disabled) {
dev_close(efx->net_dev);
- EFX_ERR(efx, "has been disabled\n");
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
- EFX_LOG(efx, "reset complete\n");
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
}
return rc;
}
@@ -1877,7 +1922,8 @@ static void efx_reset_work(struct work_struct *data)
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flag set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
- EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
+ netif_info(efx, drv, efx->net_dev,
+ "scheduled reset quenched. NIC not RUNNING\n");
return;
}
@@ -1891,7 +1937,8 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
enum reset_type method;
if (efx->reset_pending != RESET_TYPE_NONE) {
- EFX_INFO(efx, "quenching already scheduled reset\n");
+ netif_info(efx, drv, efx->net_dev,
+ "quenching already scheduled reset\n");
return;
}
@@ -1915,10 +1962,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
}
if (method != type)
- EFX_LOG(efx, "scheduling %s reset for %s\n",
- RESET_TYPE(method), RESET_TYPE(type));
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
else
- EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
efx->reset_pending = method;
@@ -2005,6 +2054,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
efx->state = STATE_INIT;
efx->reset_pending = RESET_TYPE_NONE;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
@@ -2124,7 +2174,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
efx_fini_io(efx);
- EFX_LOG(efx, "shutdown successful\n");
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
pci_set_drvdata(pci_dev, NULL);
efx_fini_struct(efx);
@@ -2149,13 +2199,15 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx->type->init(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise NIC\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise NIC\n");
goto fail3;
}
rc = efx_init_port(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise port\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
goto fail4;
}
@@ -2211,11 +2263,13 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_HIGHDMA | NETIF_F_TSO);
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
if (rc)
goto fail1;
- EFX_INFO(efx, "Solarflare Communications NIC detected\n");
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare Communications NIC detected\n");
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2253,7 +2307,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
}
if (rc) {
- EFX_ERR(efx, "Could not reset NIC\n");
+ netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
goto fail4;
}
@@ -2265,7 +2319,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail5;
- EFX_LOG(efx, "initialisation successful\n");
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
rtnl_lock();
efx_mtd_probe(efx); /* allowed to fail */
@@ -2281,7 +2335,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
efx_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
- EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
}
@@ -2385,7 +2439,7 @@ static struct dev_pm_ops efx_pm_ops = {
};
static struct pci_driver efx_pci_driver = {
- .name = EFX_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index e1e4488..060dc95 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -106,8 +106,9 @@ extern unsigned int efx_monitor_interval;
static inline void efx_schedule_channel(struct efx_channel *channel)
{
- EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
- channel->channel, raw_smp_processor_id());
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d scheduling NAPI poll on CPU%d\n",
+ channel->channel, raw_smp_processor_id());
channel->work_pending = true;
napi_schedule(&channel->napi_str);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 22026bf..3b8b0a0 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -218,8 +218,8 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
/* GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
- EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
- " setting\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
@@ -234,7 +234,7 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
siena_print_fwver(efx, info->fw_version,
@@ -242,6 +242,32 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ efx_nic_get_regs(efx, buf);
+}
+
+static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->msg_enable;
+}
+
+static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ efx->msg_enable = msg_enable;
+}
+
/**
* efx_fill_test - fill in an individual self-test entry
* @test_index: Index of the test
@@ -520,6 +546,14 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
return efx->rx_checksum_enabled;
}
+static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
+
+ return ethtool_op_set_flags(net_dev, data, supported);
+}
+
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
@@ -539,7 +573,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!already_up) {
rc = dev_open(efx->net_dev);
if (rc) {
- EFX_ERR(efx, "failed opening device.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
goto fail2;
}
}
@@ -551,9 +586,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!already_up)
dev_close(efx->net_dev);
- EFX_LOG(efx, "%s %sline self-tests\n",
- rc == 0 ? "passed" : "failed",
- (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+ netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
fail2:
fail1:
@@ -679,8 +714,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return -EOPNOTSUPP;
if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
- EFX_ERR(efx, "invalid coalescing setting. "
- "Only rx/tx_coalesce_usecs_irq are supported\n");
+ netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. "
+ "Only rx/tx_coalesce_usecs_irq are supported\n");
return -EOPNOTSUPP;
}
@@ -692,8 +727,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
efx_for_each_tx_queue(tx_queue, efx) {
if ((tx_queue->channel->channel < efx->n_rx_channels) &&
tx_usecs) {
- EFX_ERR(efx, "Channel is shared. "
- "Only RX coalescing may be set\n");
+ netif_err(efx, drv, efx->net_dev, "Channel is shared. "
+ "Only RX coalescing may be set\n");
return -EOPNOTSUPP;
}
}
@@ -721,13 +756,15 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(pause->autoneg ? EFX_FC_AUTO : 0));
if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
- EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
rc = -EINVAL;
goto out;
}
if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
- EFX_LOG(efx, "Autonegotiation is disabled\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
rc = -EINVAL;
goto out;
}
@@ -758,8 +795,9 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
- EFX_ERR(efx, "Unable to advertise requested flow "
- "control setting\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
goto out;
}
}
@@ -830,10 +868,101 @@ extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, method);
}
+static int
+efx_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, void *rules __always_unused)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ unsigned min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_FALCON_B0;
+ break;
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_SIENA_A0;
+ break;
+ default:
+ break;
+ }
+ if (efx_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ indir->size = ARRAY_SIZE(efx->rx_indir_table);
+ memcpy(indir->ring_index, efx->rx_indir_table,
+ copy_size * sizeof(indir->ring_index[0]));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t i;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ if (indir->ring_index[i] >= efx->n_rx_channels)
+ return -EINVAL;
+
+ memcpy(efx->rx_indir_table, indir->ring_index,
+ sizeof(efx->rx_indir_table));
+ efx_nic_push_rx_indir_table(efx);
+ return 0;
+}
+
const struct ethtool_ops efx_ethtool_ops = {
.get_settings = efx_ethtool_get_settings,
.set_settings = efx_ethtool_set_settings,
.get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_regs_len = efx_ethtool_get_regs_len,
+ .get_regs = efx_ethtool_get_regs,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
.nway_reset = efx_ethtool_nway_reset,
.get_link = efx_ethtool_get_link,
.get_eeprom_len = efx_ethtool_get_eeprom_len,
@@ -854,7 +983,7 @@ const struct ethtool_ops efx_ethtool_ops = {
/* Need to enable/disable TSO-IPv6 too */
.set_tso = efx_ethtool_set_tso,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
+ .set_flags = efx_ethtool_set_flags,
.get_sset_count = efx_ethtool_get_sset_count,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
@@ -863,4 +992,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_wol = efx_ethtool_get_wol,
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
+ .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 8558865..4f9d33f 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -167,13 +167,15 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
* exit without having touched the hardware.
*/
if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
- EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
- raw_smp_processor_id());
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d not for me\n", irq,
+ raw_smp_processor_id());
return IRQ_NONE;
}
efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
@@ -239,7 +241,8 @@ static int falcon_spi_wait(struct efx_nic *efx)
if (!falcon_spi_poll(efx))
return 0;
if (time_after_eq(jiffies, timeout)) {
- EFX_ERR(efx, "timed out waiting for SPI\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for SPI\n");
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
@@ -333,9 +336,10 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
if (!(status & SPI_STATUS_NRDY))
return 0;
if (time_after_eq(jiffies, timeout)) {
- EFX_ERR(efx, "SPI write timeout on device %d"
- " last status=0x%02x\n",
- spi->device_id, status);
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
@@ -469,7 +473,8 @@ static void falcon_reset_macs(struct efx_nic *efx)
udelay(10);
}
- EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XMAC core reset\n");
}
}
@@ -492,12 +497,13 @@ static void falcon_reset_macs(struct efx_nic *efx)
if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
!EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
- EFX_LOG(efx, "Completed MAC reset after %d loops\n",
- count);
+ netif_dbg(efx, hw, efx->net_dev,
+ "Completed MAC reset after %d loops\n",
+ count);
break;
}
if (count > 20) {
- EFX_ERR(efx, "MAC reset failed\n");
+ netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
break;
}
count++;
@@ -627,7 +633,8 @@ static void falcon_stats_complete(struct efx_nic *efx)
rmb(); /* read the done flag before the stats */
efx->mac_op->update_stats(efx);
} else {
- EFX_ERR(efx, "timed out waiting for statistics\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for statistics\n");
}
}
@@ -717,16 +724,17 @@ static int falcon_gmii_wait(struct efx_nic *efx)
if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
- EFX_ERR(efx, "error from GMII access "
- EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(md_stat));
+ netif_err(efx, hw, efx->net_dev,
+ "error from GMII access "
+ EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(md_stat));
return -EIO;
}
return 0;
}
udelay(10);
}
- EFX_ERR(efx, "timed out waiting for GMII\n");
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
return -ETIMEDOUT;
}
@@ -738,7 +746,8 @@ static int falcon_mdio_write(struct net_device *net_dev,
efx_oword_t reg;
int rc;
- EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing MDIO %d register %d.%d with 0x%04x\n",
prtad, devad, addr, value);
mutex_lock(&efx->mdio_lock);
@@ -812,8 +821,9 @@ static int falcon_mdio_read(struct net_device *net_dev,
if (rc == 0) {
efx_reado(efx, &reg, FR_AB_MD_RXD);
rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
- EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
- prtad, devad, addr, rc);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got %04x\n",
+ prtad, devad, addr, rc);
} else {
/* Abort the read operation */
EFX_POPULATE_OWORD_2(reg,
@@ -821,8 +831,9 @@ static int falcon_mdio_read(struct net_device *net_dev,
FRF_AB_MD_GC, 1);
efx_writeo(efx, &reg, FR_AB_MD_CS);
- EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
- prtad, devad, addr, rc);
+ netif_dbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got error %d\n",
+ prtad, devad, addr, rc);
}
out:
@@ -873,7 +884,8 @@ static void falcon_switch_mac(struct efx_nic *efx)
falcon_clock_mac(efx);
- EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
+ netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
+ EFX_IS10G(efx) ? 'X' : 'G');
/* Not all macs support a mac-level link state */
efx->xmac_poll_required = false;
falcon_reset_macs(efx);
@@ -897,8 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->phy_op = &falcon_qt202x_phy_ops;
break;
default:
- EFX_ERR(efx, "Unknown PHY type %d\n",
- efx->phy_type);
+ netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
+ efx->phy_type);
return -ENODEV;
}
@@ -926,10 +938,11 @@ static int falcon_probe_port(struct efx_nic *efx)
FALCON_MAC_STATS_SIZE);
if (rc)
return rc;
- EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
return 0;
}
@@ -969,8 +982,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
mutex_unlock(&efx->spi_lock);
if (rc) {
- EFX_ERR(efx, "Failed to read %s\n",
- efx->spi_flash ? "flash" : "EEPROM");
+ netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
+ efx->spi_flash ? "flash" : "EEPROM");
rc = -EIO;
goto out;
}
@@ -980,11 +993,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
rc = -EINVAL;
if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
- EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM bad magic 0x%x\n", magic_num);
goto out;
}
if (struct_ver < 2) {
- EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has ancient version 0x%x\n", struct_ver);
goto out;
} else if (struct_ver < 4) {
word = &nvconfig->board_magic_num;
@@ -997,7 +1012,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
csum += le16_to_cpu(*word);
if (~csum & 0xffff) {
- EFX_ERR(efx, "NVRAM has incorrect checksum\n");
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has incorrect checksum\n");
goto out;
}
@@ -1075,22 +1091,25 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
efx_oword_t glb_ctl_reg_ker;
int rc;
- EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
+ netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
+ RESET_TYPE(method));
/* Initiate device reset */
if (method == RESET_TYPE_WORLD) {
rc = pci_save_state(efx->pci_dev);
if (rc) {
- EFX_ERR(efx, "failed to backup PCI state of primary "
- "function prior to hardware reset\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of primary "
+ "function prior to hardware reset\n");
goto fail1;
}
if (efx_nic_is_dual_func(efx)) {
rc = pci_save_state(nic_data->pci_dev2);
if (rc) {
- EFX_ERR(efx, "failed to backup PCI state of "
- "secondary function prior to "
- "hardware reset\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of "
+ "secondary function prior to "
+ "hardware reset\n");
goto fail2;
}
}
@@ -1115,7 +1134,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
}
efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
- EFX_LOG(efx, "waiting for hardware reset\n");
+ netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
schedule_timeout_uninterruptible(HZ / 20);
/* Restore PCI configuration if needed */
@@ -1123,28 +1142,32 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
if (efx_nic_is_dual_func(efx)) {
rc = pci_restore_state(nic_data->pci_dev2);
if (rc) {
- EFX_ERR(efx, "failed to restore PCI config for "
- "the secondary function\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PCI config for "
+ "the secondary function\n");
goto fail3;
}
}
rc = pci_restore_state(efx->pci_dev);
if (rc) {
- EFX_ERR(efx, "failed to restore PCI config for the "
- "primary function\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PCI config for the "
+ "primary function\n");
goto fail4;
}
- EFX_LOG(efx, "successfully restored PCI config\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully restored PCI config\n");
}
/* Assert that reset complete */
efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
rc = -ETIMEDOUT;
- EFX_ERR(efx, "timed out waiting for hardware reset\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for hardware reset\n");
goto fail5;
}
- EFX_LOG(efx, "hardware reset complete\n");
+ netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
return 0;
@@ -1167,8 +1190,9 @@ static void falcon_monitor(struct efx_nic *efx)
rc = falcon_board(efx)->type->monitor(efx);
if (rc) {
- EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
- (rc == -ERANGE) ? "reported fault" : "failed");
+ netif_err(efx, hw, efx->net_dev,
+ "Board sensor %s; shutting down PHY\n",
+ (rc == -ERANGE) ? "reported fault" : "failed");
efx->phy_mode |= PHY_MODE_LOW_POWER;
rc = __efx_reconfigure_port(efx);
WARN_ON(rc);
@@ -1219,7 +1243,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
/* Wait for SRAM reset to complete */
count = 0;
do {
- EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
+ netif_dbg(efx, hw, efx->net_dev,
+ "waiting for SRAM reset (attempt %d)...\n", count);
/* SRAM reset is slow; expect around 16ms */
schedule_timeout_uninterruptible(HZ / 50);
@@ -1227,13 +1252,14 @@ static int falcon_reset_sram(struct efx_nic *efx)
/* Check for reset complete */
efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
- EFX_LOG(efx, "SRAM reset complete\n");
+ netif_dbg(efx, hw, efx->net_dev,
+ "SRAM reset complete\n");
return 0;
}
} while (++count < 20); /* wait upto 0.4 sec */
- EFX_ERR(efx, "timed out waiting for SRAM reset\n");
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
return -ETIMEDOUT;
}
@@ -1292,7 +1318,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
rc = falcon_read_nvram(efx, nvconfig);
if (rc == -EINVAL) {
- EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
board_rev = 0;
@@ -1326,7 +1353,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
/* Read the MAC addresses */
memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
- EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
+ netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
+ efx->phy_type, efx->mdio.prtad);
rc = falcon_probe_board(efx, board_rev);
if (rc)
@@ -1355,14 +1383,16 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
- EFX_LOG(efx, "Booted from %s\n",
- boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
+ netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
+ boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
+ "flash" : "EEPROM");
} else {
/* Disable VPD and set clock dividers to safe
* values for initial programming. */
boot_dev = -1;
- EFX_LOG(efx, "Booted from internal ASIC settings;"
- " setting SPI config\n");
+ netif_dbg(efx, probe, efx->net_dev,
+ "Booted from internal ASIC settings;"
+ " setting SPI config\n");
EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
/* 125 MHz / 7 ~= 20 MHz */
FRF_AB_EE_SF_CLOCK_DIV, 7,
@@ -1396,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
if (efx_nic_fpga_ver(efx) != 0) {
- EFX_ERR(efx, "Falcon FPGA not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon FPGA not supported\n");
goto fail1;
}
@@ -1406,16 +1437,19 @@ static int falcon_probe_nic(struct efx_nic *efx)
u8 pci_rev = efx->pci_dev->revision;
if ((pci_rev == 0xff) || (pci_rev == 0)) {
- EFX_ERR(efx, "Falcon rev A0 not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A0 not supported\n");
goto fail1;
}
efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
- EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 1G not supported\n");
goto fail1;
}
if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
- EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 PCI-X not supported\n");
goto fail1;
}
@@ -1429,7 +1463,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
}
if (!nic_data->pci_dev2) {
- EFX_ERR(efx, "failed to find secondary function\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to find secondary function\n");
rc = -ENODEV;
goto fail2;
}
@@ -1438,7 +1473,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
/* Now we can reset the NIC */
rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
- EFX_ERR(efx, "failed to reset NIC\n");
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
}
@@ -1448,9 +1483,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
- EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
- (u64)efx->irq_status.dma_addr,
- efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (u64)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (u64)virt_to_phys(efx->irq_status.addr));
falcon_probe_spi_devices(efx);
@@ -1474,7 +1511,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = falcon_board(efx)->type->init(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise board\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise board\n");
goto fail6;
}
@@ -1544,6 +1582,13 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+
+ /* Enable hash insertion. This is broken for the
+ * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
+ * IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
}
/* Always enable XOFF signal from RX FIFO. We enable
* or disable transmission of pause frames at the MAC. */
@@ -1617,8 +1662,12 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
- /* Set destination of both TX and RX Flush events */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
@@ -1823,6 +1872,7 @@ struct efx_nic_type falcon_b0_nic_type = {
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -1830,7 +1880,7 @@ struct efx_nic_type falcon_b0_nic_type = {
* channels */
.tx_dc_base = 0x130000,
.rx_dc_base = 0x100000,
- .offload_features = NETIF_F_IP_CSUM,
+ .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
.reset_world_flags = ETH_RESET_IRQ,
};
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index c7a933a..3d950c2 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -106,12 +106,17 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
alarms1 &= mask;
alarms2 &= mask >> 8;
if (alarms1 || alarms2) {
- EFX_ERR(efx,
- "LM87 detected a hardware failure (status %02x:%02x)"
- "%s%s\n",
- alarms1, alarms2,
- (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
- (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
+ netif_err(efx, hw, efx->net_dev,
+ "LM87 detected a hardware failure (status %02x:%02x)"
+ "%s%s%s\n",
+ alarms1, alarms2,
+ (alarms1 & LM87_ALARM_TEMP_INT) ?
+ "; board is overheating" : "",
+ (alarms1 & LM87_ALARM_TEMP_EXT1) ?
+ "; controller is overheating" : "",
+ (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
+ || alarms2) ?
+ "; electrical fault" : "");
return -ERANGE;
}
@@ -243,7 +248,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
(0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
(0 << P0_EN_1V0X_LBN));
if (rc != out) {
- EFX_INFO(efx, "power-cycling PHY\n");
+ netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
@@ -269,7 +274,8 @@ static int sfe4001_poweron(struct efx_nic *efx)
if (rc)
goto fail_on;
- EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+ netif_info(efx, hw, efx->net_dev,
+ "waiting for DSP boot (attempt %d)...\n", i);
/* In flash config mode, DSP does not turn on AFE, so
* just wait 1 second.
@@ -291,7 +297,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
}
}
- EFX_INFO(efx, "timed out waiting for DSP boot\n");
+ netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
rc = -ETIMEDOUT;
fail_on:
sfe4001_poweroff(efx);
@@ -377,7 +383,7 @@ static void sfe4001_fini(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
- EFX_INFO(efx, "%s\n", __func__);
+ netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
sfe4001_poweroff(efx);
@@ -461,7 +467,7 @@ static int sfe4001_init(struct efx_nic *efx)
if (rc)
goto fail_on;
- EFX_INFO(efx, "PHY is powered on\n");
+ netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
return 0;
fail_on:
@@ -493,7 +499,7 @@ static int sfn4111t_check_hw(struct efx_nic *efx)
static void sfn4111t_fini(struct efx_nic *efx)
{
- EFX_INFO(efx, "%s\n", __func__);
+ netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
i2c_unregister_device(falcon_board(efx)->hwmon_client);
@@ -742,13 +748,14 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
board->type = &board_types[i];
if (board->type) {
- EFX_INFO(efx, "board is %s rev %c%d\n",
+ netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
return 0;
} else {
- EFX_ERR(efx, "unknown board type %d\n", type_id);
+ netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
+ type_id);
return -ENODEV;
}
}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index c84a2ce..bae656d 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -81,7 +81,8 @@ int falcon_reset_xaui(struct efx_nic *efx)
}
udelay(10);
}
- EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
return -ETIMEDOUT;
}
@@ -256,7 +257,7 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
falcon_stop_nic_stats(efx);
while (!mac_up && tries) {
- EFX_LOG(efx, "bashing xaui\n");
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
falcon_reset_xaui(efx);
udelay(200);
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index b89177c..85a99fe 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -78,8 +78,9 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
{
unsigned long flags __attribute__ ((unused));
- EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
- EFX_OWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
@@ -105,8 +106,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
- EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
- addr, EFX_QWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+ addr, EFX_QWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
@@ -129,8 +131,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
- EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
- reg, EFX_DWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing partial register %x with "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
@@ -155,8 +158,9 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
value->u32[3] = _efx_readd(efx, reg + 12);
spin_unlock_irqrestore(&efx->biu_lock, flags);
- EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
- EFX_OWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
}
/* Read an 8-byte SRAM entry through supplied mapping,
@@ -177,8 +181,9 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
- EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
- addr, EFX_QWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+ addr, EFX_QWORD_VAL(*value));
}
/* Read dword from register that allows partial writes (sic) */
@@ -186,8 +191,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
value->u32[0] = _efx_readd(efx, reg);
- EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
- reg, EFX_DWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
}
/* Write to a register forming part of a table */
@@ -211,6 +217,13 @@ static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}
+/* Read from a dword register forming part of a table */
+static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
+}
+
/* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 93cc3c1..3912b8f 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -168,11 +168,12 @@ static int efx_mcdi_poll(struct efx_nic *efx)
error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
if (error && mcdi->resplen == 0) {
- EFX_ERR(efx, "MC rebooted\n");
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
rc = EIO;
} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
- EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
- respseq, mcdi->seqno);
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
efx_readd(efx, &reg, pdu + 4);
@@ -303,8 +304,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
/* The request has been cancelled */
--mcdi->credits;
else
- EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
- "seq 0x%x\n", seqno, mcdi->seqno);
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx "
+ "seq 0x%x\n", seqno, mcdi->seqno);
} else {
mcdi->resprc = errno;
mcdi->resplen = datalen;
@@ -352,8 +354,9 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
++mcdi->credits;
spin_unlock_bh(&mcdi->iface_lock);
- EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
} else {
size_t resplen;
@@ -374,11 +377,13 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) {
- EFX_ERR(efx, "MC fatal error %d\n", -rc);
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else
- EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d\n",
+ cmd, (int)inlen, -rc);
}
efx_mcdi_release(mcdi);
@@ -534,8 +539,9 @@ static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
- EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- monitor, name, state_txt, value);
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+ monitor, name, state_txt, value);
}
/* Called from falcon_process_eventq for MCDI events */
@@ -548,12 +554,13 @@ void efx_mcdi_process_event(struct efx_channel *channel,
switch (code) {
case MCDI_EVENT_CODE_BADSSERT:
- EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
+ netif_err(efx, hw, efx->net_dev,
+ "MC watchdog or assertion failure at 0x%x\n", data);
efx_mcdi_ev_death(efx, EINTR);
break;
case MCDI_EVENT_CODE_PMNOTICE:
- EFX_INFO(efx, "MCDI PM event.\n");
+ netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
break;
case MCDI_EVENT_CODE_CMDDONE:
@@ -570,10 +577,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
+ netif_info(efx, hw, efx->net_dev,
+ "MC Scheduler error address=0x%x\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
- EFX_INFO(efx, "MC Reboot\n");
+ netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, EIO);
break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
@@ -581,7 +589,8 @@ void efx_mcdi_process_event(struct efx_channel *channel,
break;
default:
- EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
+ netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
+ code);
}
}
@@ -627,7 +636,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -657,7 +666,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -695,7 +704,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+ __func__, rc, (int)outlen);
return rc;
}
@@ -724,7 +734,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -749,8 +759,8 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n",
- __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return rc;
}
@@ -781,7 +791,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -802,7 +812,7 @@ int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -827,7 +837,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -853,7 +863,7 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -877,7 +887,7 @@ int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -898,7 +908,7 @@ int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -948,9 +958,10 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
return 0;
fail2:
- EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+ netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+ __func__, type);
fail1:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -994,14 +1005,15 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
? "watchdog reset"
: "unknown assertion";
- EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
- MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
- MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+ netif_err(efx, hw, efx->net_dev,
+ "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
- EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
MCDI_DWORD2(outbuf, ofst));
ofst += sizeof(efx_dword_t);
}
@@ -1050,14 +1062,16 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
}
int efx_mcdi_reset_port(struct efx_nic *efx)
{
int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return rc;
}
@@ -1075,7 +1089,7 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1108,7 +1122,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
fail:
*id_out = -1;
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1143,7 +1157,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
fail:
*id_out = -1;
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1163,7 +1177,7 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1179,7 +1193,7 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 3918263..f88f4bf 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -69,8 +69,8 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n",
- __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return rc;
}
@@ -110,8 +110,8 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
return 0;
fail:
- EFX_ERR(efx, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
return rc;
}
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 86e43b1..0121e71 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -71,7 +71,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -97,7 +97,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -122,7 +122,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -150,7 +150,7 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -178,7 +178,7 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -466,8 +466,8 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
rmtadv |= ADVERTISED_Asym_Pause;
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
- EFX_ERR(efx, "warning: link partner doesn't support "
- "pause frames");
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
}
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
@@ -483,7 +483,8 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc) {
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
efx->link_state.up = false;
} else {
efx_mcdi_phy_decode_link(
@@ -526,7 +527,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc) {
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return;
}
ecmd->lp_advertising =
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 0548fcb..eeaf0bd 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -63,7 +63,8 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
/* Read MMD STATUS2 to check it is responding. */
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
- EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
+ netif_err(efx, hw, efx->net_dev,
+ "PHY MMD %d not responding.\n", mmd);
return -EIO;
}
}
@@ -72,12 +73,14 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
status = efx_mdio_read(efx, mmd, MDIO_STAT1);
if (status & MDIO_STAT1_FAULT) {
if (fault_fatal) {
- EFX_ERR(efx, "PHY MMD %d reporting fatal"
- " fault: status %x\n", mmd, status);
+ netif_err(efx, hw, efx->net_dev,
+ "PHY MMD %d reporting fatal"
+ " fault: status %x\n", mmd, status);
return -EIO;
} else {
- EFX_LOG(efx, "PHY MMD %d reporting status"
- " %x (expected)\n", mmd, status);
+ netif_dbg(efx, hw, efx->net_dev,
+ "PHY MMD %d reporting status"
+ " %x (expected)\n", mmd, status);
}
}
return 0;
@@ -103,8 +106,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
if (mask & 1) {
stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
- EFX_ERR(efx, "failed to read status of"
- " MMD %d\n", mmd);
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read status of"
+ " MMD %d\n", mmd);
return -EIO;
}
if (stat & MDIO_CTRL1_RESET)
@@ -119,8 +123,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
msleep(spintime);
}
if (in_reset != 0) {
- EFX_ERR(efx, "not all MMDs came out of reset in time."
- " MMDs still in reset: %x\n", in_reset);
+ netif_err(efx, hw, efx->net_dev,
+ "not all MMDs came out of reset in time."
+ " MMDs still in reset: %x\n", in_reset);
rc = -ETIMEDOUT;
}
return rc;
@@ -142,16 +147,18 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
if (devs1 < 0 || devs2 < 0) {
- EFX_ERR(efx, "failed to read devices present\n");
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read devices present\n");
return -EIO;
}
devices = devs1 | (devs2 << 16);
if ((devices & mmd_mask) != mmd_mask) {
- EFX_ERR(efx, "required MMDs not present: got %x, "
- "wanted %x\n", devices, mmd_mask);
+ netif_err(efx, hw, efx->net_dev,
+ "required MMDs not present: got %x, wanted %x\n",
+ devices, mmd_mask);
return -ENODEV;
}
- EFX_TRACE(efx, "Devices present: %x\n", devices);
+ netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
@@ -219,7 +226,7 @@ static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
{
int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
- EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n",
+ netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
if (stat & MDIO_STAT1_LPOWERABLE) {
@@ -349,8 +356,8 @@ int efx_mdio_test_alive(struct efx_nic *efx)
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
- efx->mdio.prtad);
+ netif_err(efx, hw, efx->net_dev,
+ "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f89e719..75791d3 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -51,7 +51,8 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
if (!sync)
- EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
+ netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
+ lane_status);
return sync;
}
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index f3ac7f3..02e54b4 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
-#define EFX_DRIVER_NAME "sfc_mtd"
#include "net_driver.h"
#include "spi.h"
#include "efx.h"
@@ -71,8 +70,10 @@ static int siena_mtd_probe(struct efx_nic *efx);
/* SPI utilities */
-static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
+static int
+efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
{
+ struct efx_mtd *efx_mtd = part->mtd.priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
u8 status;
@@ -92,7 +93,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
if (signal_pending(current))
return -EINTR;
}
- EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
+ pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
return -ETIMEDOUT;
}
@@ -131,8 +132,10 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
return 0;
}
-static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
+static int
+efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
{
+ struct efx_mtd *efx_mtd = part->mtd.priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
unsigned pos, block_len;
@@ -156,7 +159,7 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
NULL, 0);
if (rc)
return rc;
- rc = efx_spi_slow_wait(efx_mtd, false);
+ rc = efx_spi_slow_wait(part, false);
/* Verify the entire region has been wiped */
memset(empty, 0xff, sizeof(empty));
@@ -198,13 +201,14 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd)
{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
int rc;
rc = efx_mtd->ops->sync(mtd);
if (rc)
- EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
+ pr_err("%s: %s sync failed (%d)\n",
+ part->name, efx_mtd->name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -338,7 +342,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
rc = mutex_lock_interruptible(&efx->spi_lock);
if (rc)
return rc;
- rc = efx_spi_erase(efx_mtd, part->offset + start, len);
+ rc = efx_spi_erase(part, part->offset + start, len);
mutex_unlock(&efx->spi_lock);
return rc;
}
@@ -363,12 +367,13 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
static int falcon_mtd_sync(struct mtd_info *mtd)
{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
int rc;
mutex_lock(&efx->spi_lock);
- rc = efx_spi_slow_wait(efx_mtd, true);
+ rc = efx_spi_slow_wait(part, true);
mutex_unlock(&efx->spi_lock);
return rc;
}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index ba636e0..bab836c 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -13,6 +13,10 @@
#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H
+#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
+#define DEBUG
+#endif
+
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -35,9 +39,7 @@
* Build definitions
*
**************************************************************************/
-#ifndef EFX_DRIVER_NAME
-#define EFX_DRIVER_NAME "sfc"
-#endif
+
#define EFX_DRIVER_VERSION "3.0"
#ifdef EFX_ENABLE_DEBUG
@@ -48,35 +50,6 @@
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif
-/* Un-rate-limited logging */
-#define EFX_ERR(efx, fmt, args...) \
-dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
-
-#define EFX_INFO(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
-
-#ifdef EFX_ENABLE_DEBUG
-#define EFX_LOG(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
-#else
-#define EFX_LOG(efx, fmt, args...) \
-dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
-#endif
-
-#define EFX_TRACE(efx, fmt, args...) do {} while (0)
-
-#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
-
-/* Rate-limited logging */
-#define EFX_ERR_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
-
-#define EFX_INFO_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
-
-#define EFX_LOG_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
-
/**************************************************************************
*
* Efx data structures
@@ -663,6 +636,7 @@ union efx_multicast_hash {
* @interrupt_mode: Interrupt mode
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
* @state: Device state flag. Serialised by the rtnl_lock.
* @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
* @tx_queue: TX DMA queues
@@ -674,6 +648,7 @@ union efx_multicast_hash {
* @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_indir_table: Indirection table for RSS
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
@@ -746,6 +721,7 @@ struct efx_nic {
enum efx_int_mode interrupt_mode;
bool irq_rx_adaptive;
unsigned int irq_rx_moderation;
+ u32 msg_enable;
enum nic_state state;
enum reset_type reset_pending;
@@ -760,6 +736,8 @@ struct efx_nic {
unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
+ u8 rx_hash_key[40];
+ u32 rx_indir_table[128];
unsigned int_error_count;
unsigned long int_error_expire;
@@ -872,7 +850,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_padding: Padding added to each RX buffer
+ * @rx_buffer_hash_size: Size of hash at start of RX buffer
+ * @rx_buffer_padding: Size of padding at end of RX buffer
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
@@ -916,6 +895,7 @@ struct efx_nic_type {
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
+ unsigned int rx_buffer_hash_size;
unsigned int rx_buffer_padding;
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 0ee6fd3..f595d92 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -179,9 +179,10 @@ int efx_nic_test_registers(struct efx_nic *efx,
return 0;
fail:
- EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
- " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
- EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
return -EIO;
}
@@ -214,8 +215,9 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->dma_addr + (i * 4096);
- EFX_LOG(efx, "mapping special buffer %d at %llx\n",
- index, (unsigned long long)dma_addr);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
EFX_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
@@ -235,8 +237,8 @@ efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
if (!buffer->entries)
return;
- EFX_LOG(efx, "unmapping special buffers %d-%d\n",
- buffer->index, buffer->index + buffer->entries - 1);
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
EFX_POPULATE_OWORD_4(buf_tbl_upd,
FRF_AZ_BUF_UPD_CMD, 0,
@@ -276,11 +278,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
- EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
return 0;
}
@@ -291,11 +294,12 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
if (!buffer->addr)
return;
- EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, buffer->len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, buffer->len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
buffer->dma_addr);
@@ -555,9 +559,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
- EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
- rx_queue->queue, rx_queue->rxd.index,
- rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ rx_queue->queue, rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->flushed = FLUSH_NONE;
@@ -694,9 +699,10 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else {
- EFX_ERR(efx, "channel %d unexpected TX event "
- EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
}
return tx_packets;
@@ -759,20 +765,21 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
* to a FIFO overflow.
*/
#ifdef EFX_ENABLE_DEBUG
- if (rx_ev_other_err) {
- EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- rx_queue->queue, EFX_QWORD_VAL(*event),
- rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
- rx_ev_ip_hdr_chksum_err ?
- " [IP_HDR_CHKSUM_ERR]" : "",
- rx_ev_tcp_udp_chksum_err ?
- " [TCP_UDP_CHKSUM_ERR]" : "",
- rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
- rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
- rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "");
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ rx_queue->queue, EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
}
@@ -786,8 +793,9 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
expected = rx_queue->removed_count & EFX_RXQ_MASK;
dropped = (index - expected) & EFX_RXQ_MASK;
- EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
- dropped, index, expected);
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
@@ -873,9 +881,9 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
* queue. Refill it here */
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
else
- EFX_LOG(efx, "channel %d received generated "
- "event "EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
}
/* Global events are basically PHY events */
@@ -901,8 +909,9 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
- EFX_ERR(efx, "channel %d seen global RX_RESET "
- "event. Resetting.\n", channel->channel);
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen global RX_RESET event. Resetting.\n",
+ channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
@@ -911,9 +920,10 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
}
if (!handled)
- EFX_ERR(efx, "channel %d unknown global event "
- EFX_QWORD_FMT "\n", channel->channel,
- EFX_QWORD_VAL(*event));
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown global event "
+ EFX_QWORD_FMT "\n", channel->channel,
+ EFX_QWORD_VAL(*event));
}
static void
@@ -928,31 +938,35 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
switch (ev_sub_code) {
case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
- EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
- EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
- EFX_LOG(efx, "channel %d EVQ %d initialised\n",
- channel->channel, ev_sub_data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_SRM_UPD_DONE_EV:
- EFX_TRACE(efx, "channel %d SRAM update done\n",
- channel->channel);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
break;
case FSE_AZ_WAKE_UP_EV:
- EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_TIMER_EV:
- EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AA_RX_RECOVER_EV:
- EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
@@ -961,19 +975,22 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
- EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
- EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
break;
default:
- EFX_TRACE(efx, "channel %d unknown driver event code %d "
- "data %04x\n", channel->channel, ev_sub_code,
- ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
break;
}
}
@@ -996,8 +1013,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* End of events */
break;
- EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(event));
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
@@ -1033,9 +1051,10 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
efx_mcdi_process_event(channel, &event);
break;
default:
- EFX_ERR(channel->efx, "channel %d unknown event type %d"
- " (data " EFX_QWORD_FMT ")\n", channel->channel,
- ev_code, EFX_QWORD_VAL(event));
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
}
}
@@ -1060,9 +1079,10 @@ void efx_nic_init_eventq(struct efx_channel *channel)
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
- channel->channel, channel->eventq.index,
- channel->eventq.index + channel->eventq.entries - 1);
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
EFX_POPULATE_OWORD_3(reg,
@@ -1240,14 +1260,16 @@ int efx_nic_flush_queues(struct efx_nic *efx)
* leading to a reset, or fake up success anyway */
efx_for_each_tx_queue(tx_queue, efx) {
if (tx_queue->flushed != FLUSH_DONE)
- EFX_ERR(efx, "tx queue %d flush command timed out\n",
- tx_queue->queue);
+ netif_err(efx, hw, efx->net_dev,
+ "tx queue %d flush command timed out\n",
+ tx_queue->queue);
tx_queue->flushed = FLUSH_DONE;
}
efx_for_each_rx_queue(rx_queue, efx) {
if (rx_queue->flushed != FLUSH_DONE)
- EFX_ERR(efx, "rx queue %d flush command timed out\n",
- rx_queue->queue);
+ netif_err(efx, hw, efx->net_dev,
+ "rx queue %d flush command timed out\n",
+ rx_queue->queue);
rx_queue->flushed = FLUSH_DONE;
}
@@ -1319,10 +1341,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
- EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
- EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
- EFX_OWORD_VAL(fatal_intr),
- error ? "disabling bus mastering" : "no recognised error");
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
@@ -1330,8 +1352,9 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
- EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
- EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
}
/* Disable both devices */
@@ -1348,11 +1371,13 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
jiffies + EFX_INT_ERROR_EXPIRE * HZ;
}
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
- EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
- EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
- "NIC will be disabled\n");
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
@@ -1415,8 +1440,9 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
}
return result;
@@ -1437,8 +1463,9 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
int syserr;
efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
if (channel->channel == efx->fatal_irq_level) {
@@ -1457,22 +1484,21 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
-static void efx_setup_rss_indir_table(struct efx_nic *efx)
+void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
- int i = 0;
- unsigned long offset;
+ size_t i = 0;
efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return;
- for (offset = FR_BZ_RX_INDIRECTION_TBL;
- offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
- offset += 0x10) {
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- i % efx->n_rx_channels);
- efx_writed(efx, &dword, offset);
- i++;
+ efx->rx_indir_table[i]);
+ efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
}
}
@@ -1494,8 +1520,9 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
efx->name, efx);
if (rc) {
- EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
- efx->pci_dev->irq);
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
goto fail1;
}
return 0;
@@ -1507,7 +1534,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
IRQF_PROBE_SHARED, /* Not shared */
channel->name, channel);
if (rc) {
- EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
}
@@ -1605,7 +1633,7 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
- efx_setup_rss_indir_table(efx);
+ efx_nic_push_rx_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
@@ -1627,3 +1655,269 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
}
+
+/* Register dump */
+
+#define REGISTER_REVISION_A 1
+#define REGISTER_REVISION_B 2
+#define REGISTER_REVISION_C 3
+#define REGISTER_REVISION_Z 3 /* latest revision */
+
+struct efx_nic_reg {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+};
+
+#define REGISTER(name, min_rev, max_rev) { \
+ FR_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, A, A)
+#define REGISTER_AB(name) REGISTER(name, A, B)
+#define REGISTER_AZ(name) REGISTER(name, A, Z)
+#define REGISTER_BB(name) REGISTER(name, B, B)
+#define REGISTER_BZ(name) REGISTER(name, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, C, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+};
+
+struct efx_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, FR_ ## min_rev ## max_rev ## _ ## name, \
+ min_rev, max_rev, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* The register buffer is allocated with slab, so we can't
+ * reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ B, Z, 8, 1024),
+ /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(efx_oword_t);
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+ buf += sizeof(efx_oword_t);
+ }
+ }
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit register or SRAM */
+ efx_readd_table(efx, buf, table->offset, i);
+ break;
+ case 8: /* 64-bit SRAM */
+ efx_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit register */
+ efx_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ efx_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 95770e1..0438dc9 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -142,7 +142,6 @@ struct siena_nic_data {
u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
- u8 ipv6_rss_key[40];
};
extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -208,6 +207,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
+extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len);
@@ -222,6 +222,9 @@ extern int efx_nic_test_registers(struct efx_nic *efx,
const struct efx_nic_register_test *regs,
size_t n_regs);
+extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
+extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
/**************************************************************************
*
* Falcon MAC stats
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e077bef..68813d1 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -91,9 +91,10 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
if (time_after(jiffies, timeout)) {
/* Some cables have EEPROMs that conflict with the
* PHY's on-board EEPROM so it cannot load firmware */
- EFX_ERR(efx, "If an SFP+ direct attach cable is"
- " connected, please check that it complies"
- " with the SFP+ specification\n");
+ netif_err(efx, hw, efx->net_dev,
+ "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
return -ETIMEDOUT;
}
msleep(QT2025C_HEARTB_WAIT);
@@ -145,7 +146,8 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
/* Bug 17689: occasionally heartbeat starts but firmware status
* code never progresses beyond 0x00. Try again, once, after
* restarting execution of the firmware image. */
- EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+ netif_dbg(efx, hw, efx->net_dev,
+ "bashing QT2025C microcontroller\n");
qt2025c_restart_firmware(efx);
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
@@ -165,11 +167,12 @@ static void qt2025c_firmware_id(struct efx_nic *efx)
for (i = 0; i < sizeof(firmware_id); i++)
firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
- EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
- (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
- firmware_id[3] >> 4, firmware_id[3] & 0xf,
- firmware_id[4], firmware_id[5],
- firmware_id[6], firmware_id[7], firmware_id[8]);
+ netif_info(efx, probe, efx->net_dev,
+ "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
((firmware_id[3] & 0x0f) << 16) |
(firmware_id[4] << 8) | firmware_id[5];
@@ -198,7 +201,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
}
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
- EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+ netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
@@ -231,7 +234,8 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
reg = efx_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
- EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+ netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
+ phy_op_mode);
/* This sequence replicates the register writes configured in the boot
* EEPROM (including the differences between board revisions), except
@@ -287,8 +291,9 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
/* Wait for the microcontroller to be ready again */
rc = qt2025c_wait_reset(efx);
if (rc < 0) {
- EFX_ERR(efx, "PHY microcontroller reset during mode switch "
- "timed out\n");
+ netif_err(efx, hw, efx->net_dev,
+ "PHY microcontroller reset during mode switch "
+ "timed out\n");
return rc;
}
@@ -324,7 +329,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
return 0;
fail:
- EFX_ERR(efx, "PHY reset timed out\n");
+ netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
return rc;
}
@@ -353,14 +358,15 @@ static int qt202x_phy_init(struct efx_nic *efx)
rc = qt202x_reset_phy(efx);
if (rc) {
- EFX_ERR(efx, "PHY init failed\n");
+ netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
return rc;
}
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
- EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
- devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
- efx_mdio_id_rev(devid));
+ netif_info(efx, probe, efx->net_dev,
+ "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
+ devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
+ efx_mdio_id_rev(devid));
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 9fb698e..799c461 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -101,6 +101,19 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
return PAGE_SIZE << efx->rx_buffer_order;
}
+static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
+ return __le32_to_cpup((const __le32 *)(buf->data - 4));
+#else
+ const u8 *data = (const u8 *)(buf->data - 4);
+ return ((u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24);
+#endif
+}
+
/**
* efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
*
@@ -348,10 +361,11 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
if (space < EFX_RX_BATCH)
goto out;
- EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
- " level %d to level %d using %s allocation\n",
- rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
- channel->rx_alloc_push_pages ? "page" : "skb");
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d using %s allocation\n",
+ rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
+ channel->rx_alloc_push_pages ? "page" : "skb");
do {
if (channel->rx_alloc_push_pages)
@@ -366,9 +380,10 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
}
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
- EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
- "to level %d\n", rx_queue->queue,
- rx_queue->added_count - rx_queue->removed_count);
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", rx_queue->queue,
+ rx_queue->added_count - rx_queue->removed_count);
out:
if (rx_queue->notified_count != rx_queue->added_count)
@@ -402,10 +417,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*discard = true;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
- EFX_ERR_RL(efx, " RX queue %d seriously overlength "
- "RX event (0x%x > 0x%x+0x%x). Leaking\n",
- rx_queue->queue, len, max_len,
- efx->type->rx_buffer_padding);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d seriously overlength "
+ "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+ rx_queue->queue, len, max_len,
+ efx->type->rx_buffer_padding);
/* If this buffer was skb-allocated, then the meta
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
@@ -413,8 +430,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*leak_packet = (rx_buf->skb != NULL);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
- EFX_ERR_RL(efx, " RX queue %d overlength RX event "
- "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d overlength RX event "
+ "(0x%x > 0x%x)\n",
+ rx_queue->queue, len, max_len);
}
rx_queue->channel->n_rx_overlength++;
@@ -434,6 +454,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
/* Pass the skb/page into the LRO engine */
if (rx_buf->page) {
+ struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->page;
struct sk_buff *skb;
@@ -446,6 +467,9 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
return;
}
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(rx_buf);
+
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->frags[0].page_offset =
efx_rx_buf_offset(rx_buf);
@@ -502,11 +526,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
efx_rx_packet__check_len(rx_queue, rx_buf, len,
&discard, &leak_packet);
- EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
- rx_queue->queue, index,
- (unsigned long long)rx_buf->dma_addr, len,
- (checksummed ? " [SUMMED]" : ""),
- (discard ? " [DISCARD]" : ""));
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received id %x at %llx+%x %s%s\n",
+ rx_queue->queue, index,
+ (unsigned long long)rx_buf->dma_addr, len,
+ (checksummed ? " [SUMMED]" : ""),
+ (discard ? " [DISCARD]" : ""));
/* Discard packet, if instructed to do so */
if (unlikely(discard)) {
@@ -550,6 +575,9 @@ void __efx_rx_packet(struct efx_channel *channel,
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
+ rx_buf->data += efx->type->rx_buffer_hash_size;
+ rx_buf->len -= efx->type->rx_buffer_hash_size;
+
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
@@ -562,8 +590,12 @@ void __efx_rx_packet(struct efx_channel *channel,
if (rx_buf->skb) {
prefetch(skb_shinfo(rx_buf->skb));
+ skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
skb_put(rx_buf->skb, rx_buf->len);
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
@@ -621,7 +653,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
unsigned int rxq_size;
int rc;
- EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d\n", rx_queue->queue);
/* Allocate RX buffers */
rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
@@ -641,7 +674,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
unsigned int max_fill, trigger, limit;
- EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", rx_queue->queue);
/* Initialise ptr fields */
rx_queue->added_count = 0;
@@ -668,7 +702,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
int i;
struct efx_rx_buffer *rx_buf;
- EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", rx_queue->queue);
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
@@ -684,7 +719,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
- EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", rx_queue->queue);
efx_nic_remove_rx(rx_queue);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 1f83404..85f015f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -123,7 +123,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
{
struct efx_channel *channel;
- EFX_LOG(efx, "testing interrupts\n");
+ netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
/* Reset interrupt flag */
@@ -142,16 +142,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
- EFX_LOG(efx, "waiting for test interrupt\n");
+ netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
schedule_timeout_uninterruptible(HZ / 10);
if (efx->last_irq_cpu >= 0)
goto success;
- EFX_ERR(efx, "timed out waiting for interrupt\n");
+ netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
- EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
+ netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+ INT_MODE(efx),
efx->last_irq_cpu);
tests->interrupt = 1;
return 0;
@@ -161,6 +162,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
+ struct efx_nic *efx = channel->efx;
unsigned int magic_count, count;
tests->eventq_dma[channel->channel] = -1;
@@ -185,29 +187,32 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
goto eventq_ok;
} while (++count < 2);
- EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
- channel->channel);
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
/* See if interrupt arrived */
if (channel->efx->last_irq_cpu >= 0) {
- EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
- "during event queue test\n", channel->channel,
- raw_smp_processor_id());
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt on CPU%d "
+ "during event queue test\n", channel->channel,
+ raw_smp_processor_id());
tests->eventq_int[channel->channel] = 1;
}
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now(channel);
if (channel->magic_count != magic_count) {
- EFX_ERR(channel->efx, "channel %d event was generated, but "
- "failed to trigger an interrupt\n", channel->channel);
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n", channel->channel);
tests->eventq_dma[channel->channel] = 1;
}
return -ETIMEDOUT;
eventq_ok:
- EFX_LOG(channel->efx, "channel %d event queue passed\n",
- channel->channel);
+ netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
+ channel->channel);
tests->eventq_dma[channel->channel] = 1;
tests->eventq_int[channel->channel] = 1;
tests->eventq_poll[channel->channel] = 1;
@@ -260,51 +265,57 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
- EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
- "test\n", pkt_len, LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
goto err;
}
/* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
- EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check packet length */
if (pkt_len != sizeof(*payload)) {
- EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
- "%s loopback test\n", pkt_len, (int)sizeof(*payload),
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
- EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
- EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check that iteration matches */
if (received->iteration != payload->iteration) {
- EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
- "%s loopback test\n", ntohs(received->iteration),
- ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err;
}
/* Increase correct RX count */
- EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_vdbg(efx, drv, efx->net_dev,
+ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
atomic_inc(&state->rx_good);
return;
@@ -312,10 +323,10 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
err:
#ifdef EFX_ENABLE_DEBUG
if (atomic_read(&state->rx_bad) == 0) {
- EFX_ERR(efx, "received packet:\n");
+ netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0);
- EFX_ERR(efx, "expected packet:\n");
+ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0);
}
@@ -396,9 +407,11 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
- EFX_ERR(efx, "TX queue %d could not transmit packet %d "
- "of %d in %s loopback test\n", tx_queue->queue,
- i + 1, state->packet_count, LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d could not transmit packet %d of "
+ "%d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count,
+ LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb);
@@ -454,20 +467,22 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
/* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown.
*/
- EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
- "TX completion events in %s loopback test\n",
- tx_queue->queue, tx_done, state->packet_count,
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) {
- EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
- "received packets in %s loopback test\n",
- tx_queue->queue, rx_good, state->packet_count,
- LOOPBACK_MODE(efx));
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Fall through */
}
@@ -499,9 +514,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
return -ENOMEM;
state->flush = false;
- EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
- "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
- state->packet_count);
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d testing %s loopback with %d packets\n",
+ tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
efx_iterate_state(efx);
begin_rc = efx_begin_loopback(tx_queue);
@@ -525,9 +541,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
}
}
- EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
- "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
- state->packet_count);
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
return 0;
}
@@ -602,15 +619,17 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
- EFX_ERR(efx, "unable to move into %s loopback\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "unable to move into %s loopback\n",
+ LOOPBACK_MODE(efx));
goto out;
}
rc = efx_wait_for_link(efx);
if (rc) {
- EFX_ERR(efx, "loopback %s never came up\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
goto out;
}
@@ -718,7 +737,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
rc_reset = rc;
if (rc_reset) {
- EFX_ERR(efx, "Unable to recover from chip test\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to recover from chip test\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f2b1e61..3fab030 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -118,10 +118,11 @@ static int siena_probe_port(struct efx_nic *efx)
MC_CMD_MAC_NSTATS * sizeof(u64));
if (rc)
return rc;
- EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
@@ -216,7 +217,8 @@ static int siena_probe_nic(struct efx_nic *efx)
efx->nic_data = nic_data;
if (efx_nic_fpga_ver(efx) != 0) {
- EFX_ERR(efx, "Siena FPGA not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
@@ -233,8 +235,8 @@ static int siena_probe_nic(struct efx_nic *efx)
rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
if (rc) {
- EFX_ERR(efx, "Failed to read MCPU firmware version - "
- "rc %d\n", rc);
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read MCPU firmware version - rc %d\n", rc);
goto fail1; /* MCPU absent? */
}
@@ -242,17 +244,19 @@ static int siena_probe_nic(struct efx_nic *efx)
* filter settings. We must do this before we reset the NIC */
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
- EFX_ERR(efx, "Unable to register driver with MCPU\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
- EFX_ERR(efx, "Host already registered with MCPU\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
/* Now we can reset the NIC */
rc = siena_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
- EFX_ERR(efx, "failed to reset NIC\n");
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
}
@@ -264,24 +268,23 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
- EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
- (unsigned long long)efx->irq_status.dma_addr,
- efx->irq_status.addr,
- (unsigned long long)virt_to_phys(efx->irq_status.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (unsigned long long)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (unsigned long long)virt_to_phys(efx->irq_status.addr));
/* Read in the non-volatile configuration */
rc = siena_probe_nvconfig(efx);
if (rc == -EINVAL) {
- EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
} else if (rc) {
goto fail5;
}
- get_random_bytes(&nic_data->ipv6_rss_key,
- sizeof(nic_data->ipv6_rss_key));
-
return 0;
fail5:
@@ -301,7 +304,6 @@ fail1:
*/
static int siena_init_nic(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t temp;
int rc;
@@ -326,25 +328,36 @@ static int siena_init_nic(struct efx_nic *efx)
efx_reado(efx, &temp, FR_AZ_RX_CFG);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+ /* Enable hash insertion. This is broken for the 'Falcon' hash
+ * if IPv6 hashing is also enabled, so also select Toeplitz
+ * TCP/IPv4 and IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
/* Enable IPv6 RSS */
- BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
- memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
- memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
- memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
/* No MCDI operation has been defined to set thresholds */
- EFX_ERR(efx, "ignoring RX flow control thresholds\n");
+ netif_err(efx, hw, efx->net_dev,
+ "ignoring RX flow control thresholds\n");
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -565,7 +578,8 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
return 0;
fail:
- EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc);
+ netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
+ __func__, type, rc);
return rc;
}
@@ -628,6 +642,7 @@ struct efx_nic_type siena_a0_nic_type = {
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -635,6 +650,7 @@ struct efx_nic_type siena_a0_nic_type = {
* channels */
.tx_dc_base = 0x88000,
.rx_dc_base = 0x68000,
- .offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH),
.reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
};
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f21efe7..6791be9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -228,7 +228,8 @@ int sft9001_wait_boot(struct efx_nic *efx)
boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
PCS_BOOT_STATUS_REG);
if (boot_stat >= 0) {
- EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat);
+ netif_dbg(efx, hw, efx->net_dev,
+ "PHY boot status = %#x\n", boot_stat);
switch (boot_stat &
((1 << PCS_BOOT_FATAL_ERROR_LBN) |
(3 << PCS_BOOT_PROGRESS_LBN) |
@@ -463,10 +464,11 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
} else {
reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
- EFX_ERR(efx, "appears to be plugged into a port"
- " that is not 10GBASE-T capable. The PHY"
- " supports 10GBASE-T ONLY, so no link can"
- " be established\n");
+ netif_err(efx, link, efx->net_dev,
+ "appears to be plugged into a port"
+ " that is not 10GBASE-T capable. The PHY"
+ " supports 10GBASE-T ONLY, so no link can"
+ " be established\n");
}
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG, reg);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 6bb12a8..c6942da 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -42,7 +42,7 @@ void efx_stop_queue(struct efx_channel *channel)
return;
spin_lock_bh(&channel->tx_stop_lock);
- EFX_TRACE(efx, "stop TX queue\n");
+ netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
atomic_inc(&channel->tx_stop_count);
netif_tx_stop_queue(
@@ -64,7 +64,7 @@ void efx_wake_queue(struct efx_channel *channel)
local_bh_disable();
if (atomic_dec_and_lock(&channel->tx_stop_count,
&channel->tx_stop_lock)) {
- EFX_TRACE(efx, "waking TX queue\n");
+ netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
netif_tx_wake_queue(
netdev_get_tx_queue(
efx->net_dev,
@@ -94,8 +94,9 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->skb) {
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
- EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
- "complete\n", tx_queue->queue, read_ptr);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
}
}
@@ -300,9 +301,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
pci_err:
- EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
- "fragments for DMA\n", tx_queue->queue, skb->len,
- skb_shinfo(skb)->nr_frags + 1);
+ netif_err(efx, tx_err, efx->net_dev,
+ " TX queue %d could not map skb with %d bytes %d "
+ "fragments for DMA\n", tx_queue->queue, skb->len,
+ skb_shinfo(skb)->nr_frags + 1);
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb);
@@ -354,9 +356,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (unlikely(buffer->len == 0)) {
- EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
- "completion id %x\n", tx_queue->queue,
- read_ptr);
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
@@ -431,7 +433,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
unsigned int txq_size;
int i, rc;
- EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
+ netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
+ tx_queue->queue);
/* Allocate software ring */
txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
@@ -456,7 +459,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
{
- EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
@@ -488,7 +492,8 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
- EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue);
@@ -507,7 +512,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
- EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
kfree(tx_queue->buffer);
@@ -639,8 +645,8 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
if (base_kva == NULL) {
- EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
- " headers\n");
+ netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
+ "Unable to allocate page for TSO headers\n");
return -ENOMEM;
}
@@ -1124,7 +1130,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
return NETDEV_TX_OK;
mem_err:
- EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
+ netif_err(efx, tx_err, efx->net_dev,
+ "Out of memory for TSO headers, or PCI mapping error\n");
dev_kfree_skb_any(skb);
goto unwind;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 2111c7b..c762c6a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -717,11 +717,24 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
+/* Enable Rx/Tx */
+static void sky2_enable_rx_tx(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 reg;
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+}
+
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(sky2->hw, sky2->port);
+ sky2_enable_rx_tx(sky2);
spin_unlock_bh(&sky2->phy_lock);
}
@@ -2040,7 +2053,6 @@ static void sky2_link_up(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u16 reg;
static const char *fc_name[] = {
[FC_NONE] = "none",
[FC_TX] = "tx",
@@ -2048,10 +2060,7 @@ static void sky2_link_up(struct sky2_port *sky2)
[FC_BOTH] = "both",
};
- /* enable Rx/Tx */
- reg = gma_read16(hw, port, GM_GP_CTRL);
- reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
- gma_write16(hw, port, GM_GP_CTRL, reg);
+ sky2_enable_rx_tx(sky2);
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
@@ -4179,17 +4188,13 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
static int sky2_set_flags(struct net_device *dev, u32 data)
{
struct sky2_port *sky2 = netdev_priv(dev);
+ u32 supported =
+ (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+ int rc;
- if (data & ~ETH_FLAG_RXHASH)
- return -EOPNOTSUPP;
-
- if (data & ETH_FLAG_RXHASH) {
- if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
- return -EINVAL;
-
- dev->features |= NETIF_F_RXHASH;
- } else
- dev->features &= ~NETIF_F_RXHASH;
+ rc = ethtool_op_set_flags(dev, data, supported);
+ if (rc)
+ return rc;
rx_set_rss(dev);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 538148a..dc32a62 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3215,6 +3215,8 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
__func__, __LINE__, (u32) skb);
if (skb) {
skb->data = skb->head + NET_SKB_PAD;
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
__skb_queue_head(&ugeth->rx_recycle, skb);
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index c8570b0..39422f7 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1334,7 +1334,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
/* check for port already opened, if not set the termios */
serial->open_count++;
if (serial->open_count == 1) {
- tty->low_latency = 1;
serial->rx_state = RX_IDLE;
/* Force default termio settings */
_hso_serial_set_termios(tty, NULL);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 974d17f..6710f09 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -21,11 +21,11 @@
* behaves. Pegasus II support added since this version.
* TODO: suppressing HCD warnings spewage on disconnect.
* v0.4.13 Ethernet address is now set at probe(), not at open()
- * time as this seems to break dhcpd.
+ * time as this seems to break dhcpd.
* v0.5.0 branch to 2.5.x kernels
* v0.5.1 ethtool support added
* v0.5.5 rx socket buffers are in a pool and the their allocation
- * is out of the interrupt routine.
+ * is out of the interrupt routine.
*/
#include <linux/sched.h>
@@ -55,9 +55,9 @@ static const char driver_name[] = "pegasus";
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
-static int loopback = 0;
-static int mii_mode = 0;
-static char *devid=NULL;
+static int loopback;
+static int mii_mode;
+static char *devid;
static struct usb_eth_dev usb_dev_id[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \
@@ -102,8 +102,8 @@ MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'");
/* use ethtool to change the level for any given device */
static int msg_level = -1;
-module_param (msg_level, int, 0);
-MODULE_PARM_DESC (msg_level, "Override default message level");
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
MODULE_DEVICE_TABLE(usb, pegasus_ids);
static const struct net_device_ops pegasus_netdev_ops;
@@ -141,7 +141,7 @@ static void ctrl_callback(struct urb *urb)
wake_up(&pegasus->ctrl_wait);
}
-static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
+static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
void *data)
{
int ret;
@@ -196,7 +196,7 @@ out:
return ret;
}
-static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
void *data)
{
int ret;
@@ -248,7 +248,7 @@ out:
return ret;
}
-static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
+static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
int ret;
char *tmp;
@@ -299,7 +299,7 @@ out:
return ret;
}
-static int update_eth_regs_async(pegasus_t * pegasus)
+static int update_eth_regs_async(pegasus_t *pegasus)
{
int ret;
@@ -326,7 +326,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
}
/* Returns 0 on success, error on failure */
-static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
+static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
{
int i;
__u8 data[4] = { phy, 0, 0, indx };
@@ -334,7 +334,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
int ret;
set_register(pegasus, PhyCtrl, 0);
- set_registers(pegasus, PhyAddr, sizeof (data), data);
+ set_registers(pegasus, PhyAddr, sizeof(data), data);
set_register(pegasus, PhyCtrl, (indx | PHY_READ));
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, PhyCtrl, 1, data);
@@ -366,7 +366,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
return (int)res;
}
-static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
+static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd)
{
int i;
__u8 data[4] = { phy, 0, 0, indx };
@@ -402,7 +402,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
write_mii_word(pegasus, phy_id, loc, val);
}
-static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
+static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
__u8 tmp;
@@ -433,7 +433,7 @@ fail:
}
#ifdef PEGASUS_WRITE_EEPROM
-static inline void enable_eprom_write(pegasus_t * pegasus)
+static inline void enable_eprom_write(pegasus_t *pegasus)
{
__u8 tmp;
int ret;
@@ -442,7 +442,7 @@ static inline void enable_eprom_write(pegasus_t * pegasus)
set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
}
-static inline void disable_eprom_write(pegasus_t * pegasus)
+static inline void disable_eprom_write(pegasus_t *pegasus)
{
__u8 tmp;
int ret;
@@ -452,7 +452,7 @@ static inline void disable_eprom_write(pegasus_t * pegasus)
set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE);
}
-static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
+static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
{
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
@@ -484,7 +484,7 @@ fail:
}
#endif /* PEGASUS_WRITE_EEPROM */
-static inline void get_node_id(pegasus_t * pegasus, __u8 * id)
+static inline void get_node_id(pegasus_t *pegasus, __u8 *id)
{
int i;
__u16 w16;
@@ -495,7 +495,7 @@ static inline void get_node_id(pegasus_t * pegasus, __u8 * id)
}
}
-static void set_ethernet_addr(pegasus_t * pegasus)
+static void set_ethernet_addr(pegasus_t *pegasus)
{
__u8 node_id[6];
@@ -503,12 +503,12 @@ static void set_ethernet_addr(pegasus_t * pegasus)
get_registers(pegasus, 0x10, sizeof(node_id), node_id);
} else {
get_node_id(pegasus, node_id);
- set_registers(pegasus, EthID, sizeof (node_id), node_id);
+ set_registers(pegasus, EthID, sizeof(node_id), node_id);
}
- memcpy(pegasus->net->dev_addr, node_id, sizeof (node_id));
+ memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
}
-static inline int reset_mac(pegasus_t * pegasus)
+static inline int reset_mac(pegasus_t *pegasus)
{
__u8 data = 0x8;
int i;
@@ -563,7 +563,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
data[1] = 0;
data[2] = (loopback & 1) ? 0x09 : 0x01;
- memcpy(pegasus->eth_regs, data, sizeof (data));
+ memcpy(pegasus->eth_regs, data, sizeof(data));
ret = set_registers(pegasus, EthCtrl0, 3, data);
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
@@ -577,7 +577,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
return ret;
}
-static void fill_skb_pool(pegasus_t * pegasus)
+static void fill_skb_pool(pegasus_t *pegasus)
{
int i;
@@ -595,7 +595,7 @@ static void fill_skb_pool(pegasus_t * pegasus)
}
}
-static void free_skb_pool(pegasus_t * pegasus)
+static void free_skb_pool(pegasus_t *pegasus)
{
int i;
@@ -667,11 +667,11 @@ static void read_bulk_callback(struct urb *urb)
netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status);
pegasus->stats.rx_errors++;
- if (rx_status & 0x06) // long or runt
+ if (rx_status & 0x06) /* long or runt */
pegasus->stats.rx_length_errors++;
if (rx_status & 0x08)
pegasus->stats.rx_crc_errors++;
- if (rx_status & 0x10) // extra bits
+ if (rx_status & 0x10) /* extra bits */
pegasus->stats.rx_frame_errors++;
goto goon;
}
@@ -748,9 +748,8 @@ static void rx_fixup(unsigned long data)
if (pegasus->flags & PEGASUS_RX_URB_FAIL)
if (pegasus->rx_skb)
goto try_again;
- if (pegasus->rx_skb == NULL) {
+ if (pegasus->rx_skb == NULL)
pegasus->rx_skb = pull_skb(pegasus);
- }
if (pegasus->rx_skb == NULL) {
netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
tasklet_schedule(&pegasus->rx_tl);
@@ -835,7 +834,7 @@ static void intr_callback(struct urb *urb)
}
if (urb->actual_length >= 6) {
- u8 * d = urb->transfer_buffer;
+ u8 *d = urb->transfer_buffer;
/* byte 0 == tx_status1, reg 2B */
if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
@@ -918,14 +917,14 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
return &((pegasus_t *) netdev_priv(dev))->stats;
}
-static inline void disable_net_traffic(pegasus_t * pegasus)
+static inline void disable_net_traffic(pegasus_t *pegasus)
{
__le16 tmp = cpu_to_le16(0);
set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
-static inline void get_interrupt_interval(pegasus_t * pegasus)
+static inline void get_interrupt_interval(pegasus_t *pegasus)
{
u16 data;
u8 interval;
@@ -961,7 +960,7 @@ static void set_carrier(struct net_device *net)
netif_carrier_off(net);
}
-static void free_all_urbs(pegasus_t * pegasus)
+static void free_all_urbs(pegasus_t *pegasus)
{
usb_free_urb(pegasus->intr_urb);
usb_free_urb(pegasus->tx_urb);
@@ -969,7 +968,7 @@ static void free_all_urbs(pegasus_t * pegasus)
usb_free_urb(pegasus->ctrl_urb);
}
-static void unlink_all_urbs(pegasus_t * pegasus)
+static void unlink_all_urbs(pegasus_t *pegasus)
{
usb_kill_urb(pegasus->intr_urb);
usb_kill_urb(pegasus->tx_urb);
@@ -977,12 +976,11 @@ static void unlink_all_urbs(pegasus_t * pegasus)
usb_kill_urb(pegasus->ctrl_urb);
}
-static int alloc_urbs(pegasus_t * pegasus)
+static int alloc_urbs(pegasus_t *pegasus)
{
pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pegasus->ctrl_urb) {
+ if (!pegasus->ctrl_urb)
return 0;
- }
pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pegasus->rx_urb) {
usb_free_urb(pegasus->ctrl_urb);
@@ -1019,7 +1017,7 @@ static int pegasus_open(struct net_device *net)
return -ENOMEM;
res = set_registers(pegasus, EthID, 6, net->dev_addr);
-
+
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
pegasus->rx_skb->data, PEGASUS_MTU + 8,
@@ -1033,7 +1031,7 @@ static int pegasus_open(struct net_device *net)
usb_fill_int_urb(pegasus->intr_urb, pegasus->usb,
usb_rcvintpipe(pegasus->usb, 3),
- pegasus->intr_buff, sizeof (pegasus->intr_buff),
+ pegasus->intr_buff, sizeof(pegasus->intr_buff),
intr_callback, pegasus, pegasus->intr_interval);
if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
if (res == -ENODEV)
@@ -1076,9 +1074,9 @@ static void pegasus_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
pegasus_t *pegasus = netdev_priv(dev);
- strncpy(info->driver, driver_name, sizeof (info->driver) - 1);
- strncpy(info->version, DRIVER_VERSION, sizeof (info->version) - 1);
- usb_make_path(pegasus->usb, info->bus_info, sizeof (info->bus_info));
+ strncpy(info->driver, driver_name, sizeof(info->driver) - 1);
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
+ usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
/* also handles three patterns of some kind in hardware */
@@ -1098,7 +1096,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
pegasus_t *pegasus = netdev_priv(dev);
u8 reg78 = 0x04;
-
+
if (wol->wolopts & ~WOL_SUPPORTED)
return -EINVAL;
@@ -1118,7 +1116,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static inline void pegasus_reset_wol(struct net_device *dev)
{
struct ethtool_wolinfo wol;
-
+
memset(&wol, 0, sizeof wol);
(void) pegasus_set_wol(dev, &wol);
}
@@ -1178,7 +1176,7 @@ static const struct ethtool_ops ops = {
static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
{
- __u16 *data = (__u16 *) & rq->ifr_ifru;
+ __u16 *data = (__u16 *) &rq->ifr_ifru;
pegasus_t *pegasus = netdev_priv(net);
int res;
@@ -1223,7 +1221,7 @@ static void pegasus_set_multicast(struct net_device *net)
ctrl_callback(pegasus->ctrl_urb);
}
-static __u8 mii_phy_probe(pegasus_t * pegasus)
+static __u8 mii_phy_probe(pegasus_t *pegasus)
{
int i;
__u16 tmp;
@@ -1239,10 +1237,10 @@ static __u8 mii_phy_probe(pegasus_t * pegasus)
return 0xff;
}
-static inline void setup_pegasus_II(pegasus_t * pegasus)
+static inline void setup_pegasus_II(pegasus_t *pegasus)
{
__u8 data = 0xa5;
-
+
set_register(pegasus, Reg1d, 0);
set_register(pegasus, Reg7b, 1);
mdelay(100);
@@ -1254,16 +1252,15 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
set_register(pegasus, 0x83, data);
get_registers(pegasus, 0x83, 1, &data);
- if (data == 0xa5) {
+ if (data == 0xa5)
pegasus->chip = 0x8513;
- } else {
+ else
pegasus->chip = 0;
- }
set_register(pegasus, 0x80, 0xc0);
set_register(pegasus, 0x83, 0xff);
set_register(pegasus, 0x84, 0x01);
-
+
if (pegasus->features & HAS_HOME_PNA && mii_mode)
set_register(pegasus, Reg81, 6);
else
@@ -1272,7 +1269,7 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
static int pegasus_count;
-static struct workqueue_struct *pegasus_workqueue = NULL;
+static struct workqueue_struct *pegasus_workqueue;
#define CARRIER_CHECK_DELAY (2 * HZ)
static void check_carrier(struct work_struct *work)
@@ -1367,7 +1364,7 @@ static int pegasus_probe(struct usb_interface *intf,
pegasus->mii.phy_id_mask = 0x1f;
pegasus->mii.reg_num_mask = 0x1f;
spin_lock_init(&pegasus->rx_pool_lock);
- pegasus->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
+ pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
pegasus->features = usb_dev_id[dev_index].private;
@@ -1442,11 +1439,11 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus_dec_workqueue();
}
-static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
+static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
-
- netif_device_detach (pegasus->net);
+
+ netif_device_detach(pegasus->net);
cancel_delayed_work(&pegasus->carrier_check);
if (netif_running(pegasus->net)) {
usb_kill_urb(pegasus->rx_urb);
@@ -1455,11 +1452,11 @@ static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int pegasus_resume (struct usb_interface *intf)
+static int pegasus_resume(struct usb_interface *intf)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
- netif_device_attach (pegasus->net);
+ netif_device_attach(pegasus->net);
if (netif_running(pegasus->net)) {
pegasus->rx_urb->status = 0;
pegasus->rx_urb->actual_length = 0;
@@ -1498,8 +1495,8 @@ static struct usb_driver pegasus_driver = {
static void __init parse_id(char *id)
{
- unsigned int vendor_id=0, device_id=0, flags=0, i=0;
- char *token, *name=NULL;
+ unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0;
+ char *token, *name = NULL;
if ((token = strsep(&id, ":")) != NULL)
name = token;
@@ -1510,14 +1507,14 @@ static void __init parse_id(char *id)
device_id = simple_strtoul(token, NULL, 16);
flags = simple_strtoul(id, NULL, 16);
pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n",
- driver_name, name, vendor_id, device_id, flags);
+ driver_name, name, vendor_id, device_id, flags);
if (vendor_id > 0x10000 || vendor_id == 0)
return;
if (device_id > 0x10000 || device_id == 0)
return;
- for (i=0; usb_dev_id[i].name; i++);
+ for (i = 0; usb_dev_id[i].name; i++);
usb_dev_id[i].name = name;
usb_dev_id[i].vendor = vendor_id;
usb_dev_id[i].device = device_id;
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index 29f5211..65b78b3 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -68,7 +68,7 @@ enum pegasus_registers {
EpromData = 0x21, /* 0x21 low, 0x22 high byte */
EpromCtrl = 0x23,
PhyAddr = 0x25,
- PhyData = 0x26, /* 0x26 low, 0x27 high byte */
+ PhyData = 0x26, /* 0x26 low, 0x27 high byte */
PhyCtrl = 0x28,
UsbStst = 0x2a,
EthTxStat0 = 0x2b,
@@ -154,162 +154,162 @@ struct usb_eth_dev {
#else /* PEGASUS_DEV */
-PEGASUS_DEV( "3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c,
- DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ADMtek ADM8511 \"Pegasus II\" USB Ethernet",
+PEGASUS_DEV("3Com USB Ethernet 3C460B", VENDOR_3COM, 0x4601,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ATEN USB Ethernet UC-110T", VENDOR_ATEN, 0x2007,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x110c,
+ DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4104,
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4004,
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("USB HPNA/Ethernet", VENDOR_ABOCOM, 0x4007,
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4102,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x4002,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400b,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x400c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0xabc1,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("USB 10/100 Fast Ethernet", VENDOR_ABOCOM, 0x200c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Accton USB 10/100 Ethernet Adapter", VENDOR_ACCTON, 0x1046,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_ACCTON, 0x5046,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Philips USB 10/100 Ethernet", VENDOR_ACCTON, 0xb004,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ADMtek ADM8511 \"Pegasus II\" USB Ethernet",
VENDOR_ADMTEK, 0x8511,
- DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
-PEGASUS_DEV( "ADMtek ADM8513 \"Pegasus II\" USB Ethernet",
+ DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
+PEGASUS_DEV("ADMtek ADM8513 \"Pegasus II\" USB Ethernet",
VENDOR_ADMTEK, 0x8513,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet",
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ADMtek ADM8515 \"Pegasus II\" USB-2.0 Ethernet",
VENDOR_ADMTEK, 0x8515,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)",
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("ADMtek AN986 \"Pegasus\" USB Ethernet (evaluation board)",
VENDOR_ADMTEK, 0x0986,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "AN986A USB MAC", VENDOR_ADMTEK, 1986,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
- DEFAULT_GPIO_RESET | PEGASUS_II )
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("AN986A USB MAC", VENDOR_ADMTEK, 1986,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
/*
* Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
* with the same product IDs by checking the device class too.
*/
-PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4002,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4102,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x400b,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x200c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "D-Link DSB-650", VENDOR_DLINK, 0xabc1,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
- DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
-PEGASUS_DEV( "ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
+PEGASUS_DEV_CLASS("Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Belkin F5U122 10/100 USB Ethernet", VENDOR_BELKIN, 0x0122,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Billionton USBLP-100", VENDOR_BILLIONTON, 0x0987,
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("iPAQ Networking 10/100 USB", VENDOR_COMPAQ, 0x8511,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4002,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x4102,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x400b,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX", VENDOR_DLINK, 0x200c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("D-Link DSB-650TX(PNA)", VENDOR_DLINK, 0x4003,
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("D-Link DSB-650", VENDOR_DLINK, 0xabc1,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
+ DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA)
+PEGASUS_DEV("ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
DEFAULT_GPIO_RESET)
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005,
- DEFAULT_GPIO_RESET | PEGASUS_II)
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x2202,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2203,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Linksys USB100TX", VENDOR_LINKSYS, 0x2204,
- DEFAULT_GPIO_RESET | HAS_HOME_PNA )
-PEGASUS_DEV( "Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206,
- DEFAULT_GPIO_RESET | PEGASUS_II)
-PEGASUS_DEV( "Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Linksys USB10TX", VENDOR_LINKSYS, 0x200c,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Microsoft MN-110", VENDOR_MICROSOFT, 0x007a,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "NETGEAR FA101", VENDOR_NETGEAR, 0x1020,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "OCT Inc.", VENDOR_OCT, 0x0109,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "OCT USB TO Ethernet", VENDOR_OCT, 0x0901,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "SMC 202 USB Ethernet", VENDOR_SMC, 0x0200,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201,
- DEFAULT_GPIO_RESET | PEGASUS_II)
-PEGASUS_DEV( "SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100,
- DEFAULT_GPIO_RESET )
-PEGASUS_DEV( "SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110,
- DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001,
- DEFAULT_GPIO_RESET | PEGASUS_II )
+PEGASUS_DEV("GIGABYTE GN-BR402W Wireless Router", VENDOR_GIGABYTE, 0x8002,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Hawking UF100 10/100 Ethernet", VENDOR_HAWKING, 0x400c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("HP hn210c Ethernet USB", VENDOR_HP, 0x811c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("LANEED USB Ethernet LD-USBL/TX", VENDOR_LANEED, 0x4005,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x400b,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/T", VENDOR_LANEED, 0xabc1,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x200c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x2202,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2203,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Linksys USB100TX", VENDOR_LINKSYS, 0x2204,
+ DEFAULT_GPIO_RESET | HAS_HOME_PNA)
+PEGASUS_DEV("Linksys USB10T Ethernet Adapter", VENDOR_LINKSYS, 0x2206,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Linksys USBVPN1", VENDOR_LINKSYS2, 0x08b4,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("Linksys USB USB100TX", VENDOR_LINKSYS, 0x400b,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Linksys USB10TX", VENDOR_LINKSYS, 0x200c,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0001,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("MELCO/BUFFALO LUA-TX", VENDOR_MELCO, 0x0005,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("MELCO/BUFFALO LUA2-TX", VENDOR_MELCO, 0x0009,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("Microsoft MN-110", VENDOR_MICROSOFT, 0x007a,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("NETGEAR FA101", VENDOR_NETGEAR, 0x1020,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("OCT Inc.", VENDOR_OCT, 0x0109,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("OCT USB TO Ethernet", VENDOR_OCT, 0x0901,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("smartNIC 2 PnP Adapter", VENDOR_SMARTBRIDGES, 0x0003,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("SMC 202 USB Ethernet", VENDOR_SMC, 0x0200,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("SMC 2206 USB Ethernet", VENDOR_SMC, 0x0201,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("SOHOware NUB100 Ethernet", VENDOR_SOHOWARE, 0x9100,
+ DEFAULT_GPIO_RESET)
+PEGASUS_DEV("SOHOware NUB110 Ethernet", VENDOR_SOHOWARE, 0x9110,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
+PEGASUS_DEV("SpeedStream USB 10/100 Ethernet", VENDOR_SIEMENS, 0x1001,
+ DEFAULT_GPIO_RESET | PEGASUS_II)
#endif /* PEGASUS_DEV */
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 3935c44..de1ba14 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -275,17 +275,16 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
-static u32
-vmxnet3_get_flags(struct net_device *netdev) {
- return netdev->features;
-}
-
static int
-vmxnet3_set_flags(struct net_device *netdev, u32 data) {
+vmxnet3_set_flags(struct net_device *netdev, u32 data)
+{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+ if (data & ~ETH_FLAG_LRO)
+ return -EOPNOTSUPP;
+
if (lro_requested ^ lro_present) {
/* toggle the LRO feature*/
netdev->features ^= NETIF_F_LRO;
@@ -554,7 +553,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_strings = vmxnet3_get_strings,
- .get_flags = vmxnet3_get_flags,
+ .get_flags = ethtool_op_get_flags,
.set_flags = vmxnet3_set_flags,
.get_sset_count = vmxnet3_get_sset_count,
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 429b281..cd8caea 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -7,6 +7,7 @@
* Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com>
* Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org>
* Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi>
+ * Copyright (c) 2010 Sebastian Smolorz <sesmo@gmx.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -1649,6 +1650,58 @@ exit:
return NULL;
}
+static int at76_join(struct at76_priv *priv)
+{
+ struct at76_req_join join;
+ int ret;
+
+ memset(&join, 0, sizeof(struct at76_req_join));
+ memcpy(join.essid, priv->essid, priv->essid_size);
+ join.essid_size = priv->essid_size;
+ memcpy(join.bssid, priv->bssid, ETH_ALEN);
+ join.bss_type = INFRASTRUCTURE_MODE;
+ join.channel = priv->channel;
+ join.timeout = cpu_to_le16(2000);
+
+ at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
+ ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
+ sizeof(struct at76_req_join));
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
+ wiphy_name(priv->hw->wiphy), ret);
+ return 0;
+ }
+
+ ret = at76_wait_completion(priv, CMD_JOIN);
+ at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
+ if (ret != CMD_STATUS_COMPLETE) {
+ printk(KERN_ERR "%s: at76_wait_completion failed: %d\n",
+ wiphy_name(priv->hw->wiphy), ret);
+ return 0;
+ }
+
+ at76_set_pm_mode(priv);
+
+ return 0;
+}
+
+static void at76_work_join_bssid(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ work_join_bssid);
+
+ if (priv->device_unplugged)
+ return;
+
+ mutex_lock(&priv->mtx);
+
+ if (is_valid_ether_addr(priv->bssid))
+ at76_join(priv);
+
+ mutex_unlock(&priv->mtx);
+}
+
static void at76_mac80211_tx_callback(struct urb *urb)
{
struct at76_priv *priv = urb->context;
@@ -1686,6 +1739,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct at76_priv *priv = hw->priv;
struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
int padding, submit_len, ret;
at76_dbg(DBG_MAC80211, "%s()", __func__);
@@ -1696,6 +1750,21 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_BUSY;
}
+ /* The following code lines are important when the device is going to
+ * authenticate with a new bssid. The driver must send CMD_JOIN before
+ * an authentication frame is transmitted. For this to succeed, the
+ * correct bssid of the AP must be known. As mac80211 does not inform
+ * drivers about the bssid prior to the authentication process the
+ * following workaround is necessary. If the TX frame is an
+ * authentication frame extract the bssid and send the CMD_JOIN. */
+ if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
+ if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
+ memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
+ ieee80211_queue_work(hw, &priv->work_join_bssid);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
ieee80211_stop_queues(hw);
at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */
@@ -1770,6 +1839,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
at76_dbg(DBG_MAC80211, "%s()", __func__);
cancel_delayed_work(&priv->dwork_hw_scan);
+ cancel_work_sync(&priv->work_join_bssid);
cancel_work_sync(&priv->work_set_promisc);
mutex_lock(&priv->mtx);
@@ -1818,42 +1888,6 @@ static void at76_remove_interface(struct ieee80211_hw *hw,
at76_dbg(DBG_MAC80211, "%s()", __func__);
}
-static int at76_join(struct at76_priv *priv)
-{
- struct at76_req_join join;
- int ret;
-
- memset(&join, 0, sizeof(struct at76_req_join));
- memcpy(join.essid, priv->essid, priv->essid_size);
- join.essid_size = priv->essid_size;
- memcpy(join.bssid, priv->bssid, ETH_ALEN);
- join.bss_type = INFRASTRUCTURE_MODE;
- join.channel = priv->channel;
- join.timeout = cpu_to_le16(2000);
-
- at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
- ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
- sizeof(struct at76_req_join));
-
- if (ret < 0) {
- printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- return 0;
- }
-
- ret = at76_wait_completion(priv, CMD_JOIN);
- at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
- if (ret != CMD_STATUS_COMPLETE) {
- printk(KERN_ERR "%s: at76_wait_completion failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- return 0;
- }
-
- at76_set_pm_mode(priv);
-
- return 0;
-}
-
static void at76_dwork_hw_scan(struct work_struct *work)
{
struct at76_priv *priv = container_of(work, struct at76_priv,
@@ -2107,6 +2141,7 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
mutex_init(&priv->mtx);
INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc);
INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx);
+ INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid);
INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0);
@@ -2508,5 +2543,6 @@ MODULE_AUTHOR("Balint Seeber <n0_5p4m_p13453@hotmail.com>");
MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
MODULE_AUTHOR("Kalle Valo <kalle.valo@iki.fi>");
+MODULE_AUTHOR("Sebastian Smolorz <sesmo@gmx.net>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 972ea0f..4a37447 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -387,6 +387,7 @@ struct at76_priv {
/* work queues */
struct work_struct work_set_promisc;
struct work_struct work_submit_rx;
+ struct work_struct work_join_bssid;
struct delayed_work dwork_hw_scan;
struct tasklet_struct rx_tasklet;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index cf16318..ea6362a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -566,7 +566,7 @@ enum ath5k_pkt_type {
)
/*
- * DMA size definitions (2^n+2)
+ * DMA size definitions (2^(n+2))
*/
enum ath5k_dmasize {
AR5K_DMASIZE_4B = 0,
@@ -1127,15 +1127,10 @@ struct ath5k_hw {
/*
* Function pointers
*/
- int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags);
int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
unsigned int, unsigned int, int, enum ath5k_pkt_type,
unsigned int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int, unsigned int);
- int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
- unsigned int, unsigned int, unsigned int, unsigned int,
- unsigned int, unsigned int);
int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
struct ath5k_tx_status *);
int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1236,6 +1231,11 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
/* Hardware Descriptor Functions */
int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ u32 size, unsigned int flags);
+int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
+ u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
/* GPIO Functions */
void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index ef2dc1d..b32e28c 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -126,6 +126,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
+ ah->ah_current_channel = &sc->channels[0];
/*
* Find the mac version
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9d37c1a..20328bd 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -311,7 +311,8 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
static int ath5k_txbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf,
struct ath5k_txq *txq, int padsize);
-static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
+
+static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
BUG_ON(!bf);
@@ -321,9 +322,11 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
}
-static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
+static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
struct ath5k_hw *ah = sc->ah;
@@ -336,6 +339,8 @@ static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
}
@@ -352,7 +357,6 @@ static void ath5k_txq_release(struct ath5k_softc *sc);
static int ath5k_rx_start(struct ath5k_softc *sc);
static void ath5k_rx_stop(struct ath5k_softc *sc);
static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
- struct ath5k_desc *ds,
struct sk_buff *skb,
struct ath5k_rx_status *rs);
static void ath5k_tasklet_rx(unsigned long data);
@@ -765,7 +769,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
* return false w/o doing anything. MAC's that do
* support it will return true w/o doing anything.
*/
- ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+ ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+
if (ret < 0)
goto err;
if (ret > 0)
@@ -1111,8 +1116,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
static int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
- sc->curchan->center_freq, chan->center_freq);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "channel set, resetting (%u -> %u MHz)\n",
+ sc->curchan->center_freq, chan->center_freq);
/*
* To switch channels clear any pending DMA operations;
@@ -1228,21 +1234,23 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* not get overrun under high load (as can happen with a
* 5212 when ANI processing enables PHY error frames).
*
- * To insure the last descriptor is self-linked we create
+ * To ensure the last descriptor is self-linked we create
* each descriptor as self-linked and add it to the end. As
* each additional descriptor is added the previous self-linked
- * entry is ``fixed'' naturally. This should be safe even
+ * entry is "fixed" naturally. This should be safe even
* if DMA is happening. When processing RX interrupts we
* never remove/process the last, self-linked, entry on the
- * descriptor list. This insures the hardware always has
+ * descriptor list. This ensures the hardware always has
* someplace to write a new frame.
*/
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
- ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
- if (ret)
+ ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+ if (ret) {
+ ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
return ret;
+ }
if (sc->rxlink != NULL)
*sc->rxlink = bf->daddr;
@@ -1347,7 +1355,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
mrr_tries[i] = info->control.rates[i + 1].count;
}
- ah->ah_setup_mrr_tx_desc(ah, ds,
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
mrr_rate[0], mrr_tries[0],
mrr_rate[1], mrr_tries[1],
mrr_rate[2], mrr_tries[2]);
@@ -1443,17 +1451,20 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
{
struct ath5k_buf *bf;
- ath5k_txbuf_free(sc, sc->bbuf);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
list_for_each_entry(bf, &sc->txbuf, list)
- ath5k_txbuf_free(sc, bf);
+ ath5k_txbuf_free_skb(sc, bf);
list_for_each_entry(bf, &sc->rxbuf, list)
- ath5k_rxbuf_free(sc, bf);
+ ath5k_rxbuf_free_skb(sc, bf);
/* Free memory associated with all descriptors */
pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+ sc->desc = NULL;
+ sc->desc_daddr = 0;
kfree(sc->bufptr);
sc->bufptr = NULL;
+ sc->bbuf = NULL;
}
@@ -1602,7 +1613,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ath5k_debug_printtxbuf(sc, bf);
- ath5k_txbuf_free(sc, bf);
+ ath5k_txbuf_free_skb(sc, bf);
spin_lock_bh(&sc->txbuflock);
list_move_tail(&bf->list, &sc->txbuf);
@@ -1721,8 +1732,8 @@ ath5k_rx_stop(struct ath5k_softc *sc)
}
static unsigned int
-ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
- struct sk_buff *skb, struct ath5k_rx_status *rs)
+ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
@@ -1889,9 +1900,138 @@ static int ath5k_remove_padding(struct sk_buff *skb)
}
static void
-ath5k_tasklet_rx(unsigned long data)
+ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
{
struct ieee80211_rx_status *rxs;
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - hdrlen % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ ath5k_remove_padding(skb);
+
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ rxs->flag = 0;
+ if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ /*
+ * always extend the mac timestamp, since this information is
+ * also needed for proper IBSS merging.
+ *
+ * XXX: it might be too late to do it here, since rs_tstamp is
+ * 15bit only. that means TSF extension has to be done within
+ * 32768usec (about 32ms). it might be necessary to move this to
+ * the interrupt handler, like it is done in madwifi.
+ *
+ * Unfortunately we don't know when the hardware takes the rx
+ * timestamp (beginning of phy frame, data frame, end of rx?).
+ * The only thing we know is that it is hardware specific...
+ * On AR5213 it seems the rx timestamp is at the end of the
+ * frame, but i'm not sure.
+ *
+ * NOTE: mac80211 defines mactime at the beginning of the first
+ * data symbol. Since we don't have any time references it's
+ * impossible to comply to that. This affects IBSS merge only
+ * right now, so it's not too bad...
+ */
+ rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
+ rxs->flag |= RX_FLAG_TSFT;
+
+ rxs->freq = sc->curchan->center_freq;
+ rxs->band = sc->curband->band;
+
+ rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
+
+ rxs->antenna = rs->rs_antenna;
+
+ if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
+ sc->stats.antenna_rx[rs->rs_antenna]++;
+ else
+ sc->stats.antenna_rx[0]++; /* invalid */
+
+ rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
+ rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
+
+ if (rxs->rate_idx >= 0 && rs->rs_rate ==
+ sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+ rxs->flag |= RX_FLAG_SHORTPRE;
+
+ ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+
+ ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
+
+ /* check beacons in IBSS mode */
+ if (sc->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_check_ibss_tsf(sc, skb, rxs);
+
+ ieee80211_rx(sc->hw, skb);
+}
+
+/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
+ *
+ * Check if we want to further process this frame or not. Also update
+ * statistics. Return true if we want this frame, false if not.
+ */
+static bool
+ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
+{
+ sc->stats.rx_all_count++;
+
+ if (unlikely(rs->rs_status)) {
+ if (rs->rs_status & AR5K_RXERR_CRC)
+ sc->stats.rxerr_crc++;
+ if (rs->rs_status & AR5K_RXERR_FIFO)
+ sc->stats.rxerr_fifo++;
+ if (rs->rs_status & AR5K_RXERR_PHY) {
+ sc->stats.rxerr_phy++;
+ if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
+ sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
+ return false;
+ }
+ if (rs->rs_status & AR5K_RXERR_DECRYPT) {
+ /*
+ * Decrypt error. If the error occurred
+ * because there was no hardware key, then
+ * let the frame through so the upper layers
+ * can process it. This is necessary for 5210
+ * parts which have no way to setup a ``clear''
+ * key cache entry.
+ *
+ * XXX do key cache faulting
+ */
+ sc->stats.rxerr_decrypt++;
+ if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
+ !(rs->rs_status & AR5K_RXERR_CRC))
+ return true;
+ }
+ if (rs->rs_status & AR5K_RXERR_MIC) {
+ sc->stats.rxerr_mic++;
+ return true;
+ }
+
+ /* let crypto-error packets fall through in MNTR */
+ if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
+ sc->opmode != NL80211_IFTYPE_MONITOR)
+ return false;
+ }
+
+ if (unlikely(rs->rs_more)) {
+ sc->stats.rxerr_jumbo++;
+ return false;
+ }
+ return true;
+}
+
+static void
+ath5k_tasklet_rx(unsigned long data)
+{
struct ath5k_rx_status rs = {};
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
@@ -1901,7 +2041,6 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- int rx_flag;
spin_lock(&sc->rxbuflock);
if (list_empty(&sc->rxbuf)) {
@@ -1909,8 +2048,6 @@ ath5k_tasklet_rx(unsigned long data)
goto unlock;
}
do {
- rx_flag = 0;
-
bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
BUG_ON(bf->skb == NULL);
skb = bf->skb;
@@ -1926,137 +2063,30 @@ ath5k_tasklet_rx(unsigned long data)
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error in processing rx descriptor\n");
sc->stats.rxerr_proc++;
- spin_unlock(&sc->rxbuflock);
- return;
+ break;
}
- sc->stats.rx_all_count++;
-
- if (unlikely(rs.rs_status)) {
- if (rs.rs_status & AR5K_RXERR_CRC)
- sc->stats.rxerr_crc++;
- if (rs.rs_status & AR5K_RXERR_FIFO)
- sc->stats.rxerr_fifo++;
- if (rs.rs_status & AR5K_RXERR_PHY) {
- sc->stats.rxerr_phy++;
- if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
- sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
- goto next;
- }
- if (rs.rs_status & AR5K_RXERR_DECRYPT) {
- /*
- * Decrypt error. If the error occurred
- * because there was no hardware key, then
- * let the frame through so the upper layers
- * can process it. This is necessary for 5210
- * parts which have no way to setup a ``clear''
- * key cache entry.
- *
- * XXX do key cache faulting
- */
- sc->stats.rxerr_decrypt++;
- if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
- !(rs.rs_status & AR5K_RXERR_CRC))
- goto accept;
- }
- if (rs.rs_status & AR5K_RXERR_MIC) {
- rx_flag |= RX_FLAG_MMIC_ERROR;
- sc->stats.rxerr_mic++;
- goto accept;
- }
+ if (ath5k_receive_frame_ok(sc, &rs)) {
+ next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
- /* let crypto-error packets fall through in MNTR */
- if ((rs.rs_status &
- ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
- sc->opmode != NL80211_IFTYPE_MONITOR)
+ /*
+ * If we can't replace bf->skb with a new skb under
+ * memory pressure, just skip this packet
+ */
+ if (!next_skb)
goto next;
- }
-
- if (unlikely(rs.rs_more)) {
- sc->stats.rxerr_jumbo++;
- goto next;
- }
-accept:
- next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
+ pci_unmap_single(sc->pdev, bf->skbaddr,
+ common->rx_bufsize,
+ PCI_DMA_FROMDEVICE);
- /*
- * If we can't replace bf->skb with a new skb under memory
- * pressure, just skip this packet
- */
- if (!next_skb)
- goto next;
-
- pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, rs.rs_datalen);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - hdrlen % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- ath5k_remove_padding(skb);
+ skb_put(skb, rs.rs_datalen);
- rxs = IEEE80211_SKB_RXCB(skb);
+ ath5k_receive_frame(sc, skb, &rs);
- /*
- * always extend the mac timestamp, since this information is
- * also needed for proper IBSS merging.
- *
- * XXX: it might be too late to do it here, since rs_tstamp is
- * 15bit only. that means TSF extension has to be done within
- * 32768usec (about 32ms). it might be necessary to move this to
- * the interrupt handler, like it is done in madwifi.
- *
- * Unfortunately we don't know when the hardware takes the rx
- * timestamp (beginning of phy frame, data frame, end of rx?).
- * The only thing we know is that it is hardware specific...
- * On AR5213 it seems the rx timestamp is at the end of the
- * frame, but i'm not sure.
- *
- * NOTE: mac80211 defines mactime at the beginning of the first
- * data symbol. Since we don't have any time references it's
- * impossible to comply to that. This affects IBSS merge only
- * right now, so it's not too bad...
- */
- rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp);
- rxs->flag = rx_flag | RX_FLAG_TSFT;
-
- rxs->freq = sc->curchan->center_freq;
- rxs->band = sc->curband->band;
-
- rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
-
- rxs->antenna = rs.rs_antenna;
-
- if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
- sc->stats.antenna_rx[rs.rs_antenna]++;
- else
- sc->stats.antenna_rx[0]++; /* invalid */
-
- rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
- rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
-
- if (rxs->rate_idx >= 0 && rs.rs_rate ==
- sc->curband->bitrates[rxs->rate_idx].hw_value_short)
- rxs->flag |= RX_FLAG_SHORTPRE;
-
- ath5k_debug_dump_skb(sc, skb, "RX ", 0);
-
- ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
-
- /* check beacons in IBSS mode */
- if (sc->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_check_ibss_tsf(sc, skb, rxs);
-
- ieee80211_rx(sc->hw, skb);
-
- bf->skb = next_skb;
- bf->skbaddr = next_skb_addr;
+ bf->skb = next_skb;
+ bf->skbaddr = next_skb_addr;
+ }
next:
list_move_tail(&bf->list, &sc->rxbuf);
} while (ath5k_rxbuf_setup(sc, bf) == 0);
@@ -2065,8 +2095,6 @@ unlock:
}
-
-
/*************\
* TX Handling *
\*************/
@@ -2298,6 +2326,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
"stuck beacon time (%u missed)\n",
sc->bmisscount);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "stuck beacon, resetting\n");
tasklet_schedule(&sc->restq);
}
return;
@@ -2647,7 +2677,7 @@ ath5k_stop_hw(struct ath5k_softc *sc)
ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
"putting device to sleep\n");
}
- ath5k_txbuf_free(sc, sc->bbuf);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
mmiowb();
mutex_unlock(&sc->lock);
@@ -2705,6 +2735,8 @@ ath5k_intr(int irq, void *dev_id)
* Fatal errors are unrecoverable.
* Typically these are caused by DMA errors.
*/
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "fatal int, resetting\n");
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
/*
@@ -2717,8 +2749,11 @@ ath5k_intr(int irq, void *dev_id)
* this guess is copied from the HAL.
*/
sc->stats.rxorn_intr++;
- if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "rx overrun, resetting\n");
tasklet_schedule(&sc->restq);
+ }
else
tasklet_schedule(&sc->rxtq);
} else {
@@ -3368,7 +3403,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ath5k_debug_dump_skb(sc, skb, "BC ", 1);
- ath5k_txbuf_free(sc, sc->bbuf);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
sc->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, sc->bbuf);
if (ret)
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 0f2e37d..8c63886 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -278,6 +278,7 @@ static ssize_t write_file_reset(struct file *file,
size_t count, loff_t *ppos)
{
struct ath5k_softc *sc = file->private_data;
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
tasklet_schedule(&sc->restq);
return count;
}
@@ -924,7 +925,7 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
ds, (unsigned long long)bf->daddr,
ds->ds_link, ds->ds_data,
rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
- rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0,
+ rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
!done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
}
@@ -939,7 +940,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
return;
- printk(KERN_DEBUG "rx queue %x, link %p\n",
+ printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
ath5k_hw_get_rxdp(ah), sc->rxlink);
spin_lock_bh(&sc->rxbuflock);
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index da5dbb63..4324438 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -91,14 +91,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
/*
- * Verify and set header length
- * XXX: I only found that on 5210 code, does it work on 5211 ?
+ * Verify and set header length (only 5210)
*/
if (ah->ah_version == AR5K_AR5210) {
- if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
+ if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
return -EINVAL;
tx_ctl->tx_control_0 |=
- AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
+ AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
}
/*Differences between 5210-5211*/
@@ -110,11 +109,11 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
default:
- frame_type = type /*<< 2 ?*/;
+ frame_type = type;
}
tx_ctl->tx_control_0 |=
- AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
+ AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
} else {
@@ -123,21 +122,30 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_REG_SM(antenna_mode,
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |=
- AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
+ AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
}
+
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
}
-
+#define _TX_FLAGS_5211(_c, _flag) \
+ if (flags & AR5K_TXDESC_##_flag) { \
+ tx_ctl->tx_control_##_c |= \
+ AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
+ }
_TX_FLAGS(0, CLRDMASK);
- _TX_FLAGS(0, VEOL);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
- _TX_FLAGS(1, NOACK);
+
+ if (ah->ah_version == AR5K_AR5211) {
+ _TX_FLAGS_5211(0, VEOL);
+ _TX_FLAGS_5211(1, NOACK);
+ }
#undef _TX_FLAGS
+#undef _TX_FLAGS_5211
/*
* WEP crap
@@ -147,7 +155,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |=
AR5K_REG_SM(key_index,
- AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
}
/*
@@ -156,7 +164,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
if ((ah->ah_version == AR5K_AR5210) &&
(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
tx_ctl->tx_control_1 |= rtscts_duration &
- AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
+ AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
return 0;
}
@@ -255,7 +263,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
if (key_index != AR5K_TXKEYIX_INVALID) {
tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
- AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+ AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
}
/*
@@ -277,13 +285,17 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/*
* Initialize a 4-word multi rate retry tx control descriptor on 5212
*/
-static int
+int
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
+ /* no mrr support for cards older than 5212 */
+ if (ah->ah_version < AR5K_AR5212)
+ return 0;
+
/*
* Rates can be 0 as long as the retry count is 0 too.
* A zero rate and nonzero retry count will put the HW into a mode where
@@ -323,15 +335,6 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/* no mrr support for cards older than 5212 */
-static int
-ath5k_hw_setup_no_mrr(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
- u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
-{
- return 0;
-}
-
/*
* Proccess the tx status descriptor on 5210/5211
*/
@@ -414,11 +417,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = (tx_status->tx_status_1 &
- AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
+ AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
ts->ts_status = 0;
ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1,
- AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX);
+ AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
/* The longretry counter has the number of un-acked retries
* for the final rate. To get the total number of retries
@@ -480,8 +483,8 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
/*
* Initialize an rx control descriptor
*/
-static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags)
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
@@ -496,10 +499,11 @@ static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
*/
memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
+ if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
+ return -EINVAL;
+
/* Setup descriptor */
rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
- if (unlikely(rx_ctl->rx_control_1 != size))
- return -EINVAL;
if (flags & AR5K_RXDESC_INTREQ)
rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
@@ -515,13 +519,15 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
{
struct ath5k_hw_rx_status *rx_status;
- rx_status = &desc->ud.ds_rx.u.rx_stat;
+ rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
- AR5K_5210_RX_DESC_STATUS1_DONE)))
+ AR5K_5210_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
+ memset(rs, 0, sizeof(struct ath5k_rx_status));
+
/*
* Frame receive status
*/
@@ -531,15 +537,23 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
- rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
- AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
rs->rs_more = !!(rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_MORE);
- /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
+ /* TODO: this timestamp is 13 bit, later on we assume 15 bit!
+ * also the HAL code for 5210 says the timestamp is bits [10..22] of the
+ * TSF, and extends the timestamp here to 15 bit.
+ * we need to check on 5210...
+ */
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
- rs->rs_status = 0;
- rs->rs_phyerr = 0;
+
+ if (ah->ah_version == AR5K_AR5211)
+ rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
+ else
+ rs->rs_antenna = (rx_status->rx_status_0 &
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
+ ? 2 : 1;
/*
* Key table status
@@ -554,19 +568,21 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
- AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+ AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
- if (rx_status->rx_status_1 &
- AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
+ /* only on 5210 */
+ if ((ah->ah_version == AR5K_AR5210) &&
+ (rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
rs->rs_status |= AR5K_RXERR_FIFO;
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
- rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
+ rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
}
@@ -582,21 +598,20 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
* Proccess the rx status descriptor on 5212
*/
static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
- struct ath5k_hw_rx_error *rx_err;
-
- rx_status = &desc->ud.ds_rx.u.rx_stat;
- /* Overlay on error */
- rx_err = &desc->ud.ds_rx.u.rx_err;
+ rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
- AR5K_5212_RX_DESC_STATUS1_DONE)))
+ AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
+ memset(rs, 0, sizeof(struct ath5k_rx_status));
+
/*
* Frame receive status
*/
@@ -612,15 +627,13 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
AR5K_5212_RX_DESC_STATUS0_MORE);
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
- rs->rs_status = 0;
- rs->rs_phyerr = 0;
/*
* Key table status
*/
if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
- AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
+ AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
@@ -628,7 +641,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
- AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+ AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
@@ -636,9 +649,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
if (rx_status->rx_status_1 &
AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
- rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
- AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
- ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
+ rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
+ AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &
@@ -649,7 +663,6 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
rs->rs_status |= AR5K_RXERR_MIC;
}
-
return 0;
}
@@ -658,29 +671,15 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
*/
int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
-
- if (ah->ah_version != AR5K_AR5210 &&
- ah->ah_version != AR5K_AR5211 &&
- ah->ah_version != AR5K_AR5212)
- return -ENOTSUPP;
-
if (ah->ah_version == AR5K_AR5212) {
- ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
- ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
- } else {
- ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
+ ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
+ } else if (ah->ah_version <= AR5K_AR5211) {
ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
- ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_no_mrr;
ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
- }
-
- if (ah->ah_version == AR5K_AR5212)
- ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
- else if (ah->ah_version <= AR5K_AR5211)
ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
-
+ } else
+ return -ENOTSUPP;
return 0;
}
-
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 64538fb..b2adb2a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -17,28 +17,24 @@
*/
/*
- * Internal RX/TX descriptor structures
- * (rX: reserved fields possibily used by future versions of the ar5k chipset)
+ * RX/TX descriptor structures
*/
/*
- * common hardware RX control descriptor
+ * Common hardware RX control descriptor
*/
struct ath5k_hw_rx_ctl {
u32 rx_control_0; /* RX control word 0 */
u32 rx_control_1; /* RX control word 1 */
} __packed;
-/* RX control word 0 field/sflags */
-#define AR5K_DESC_RX_CTL0 0x00000000
-
/* RX control word 1 fields/flags */
-#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff
-#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000
+#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
/*
- * common hardware RX status descriptor
- * 5210/11 and 5212 differ only in the flags defined below
+ * Common hardware RX status descriptor
+ * 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
u32 rx_status_0; /* RX status word 0 */
@@ -47,81 +43,69 @@ struct ath5k_hw_rx_status {
/* 5210/5211 */
/* RX status word 0 fields/flags */
-#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff
-#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000
+#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
+#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210 0x00004000 /* [5210] receive on ant 1 */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 /* reception rate */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 /* rssi */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211 0x38000000 /* [5211] receive antenna */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S 27
/* RX status word 1 fields/flags */
-#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001
-#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
-#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004
-#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008
-#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010
-#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0
+#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */
+#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
+#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */
+#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decyption CRC failure */
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */
#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decyption key index */
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
-#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */
#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
-#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000
+#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 /* key cache miss */
/* 5212 */
/* RX status word 0 fields/flags */
-#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff
-#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000
-#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000
+#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
+#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 /* decompression CRC error */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 /* reception rate */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 /* rssi */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 /* receive antenna */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
/* RX status word 1 fields/flags */
-#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001
-#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
-#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004
-#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008
-#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010
-#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00
+#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* frame reception success */
+#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
+#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 /* decryption CRC failure */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 /* PHY error */
+#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 /* MIC decrypt error */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 /* decryption key index */
#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9
-#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 /* first 15bit of the TSF */
#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
-#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000
-
-/*
- * common hardware RX error descriptor
- */
-struct ath5k_hw_rx_error {
- u32 rx_error_0; /* RX status word 0 */
- u32 rx_error_1; /* RX status word 1 */
-} __packed;
-
-/* RX error word 0 fields/flags */
-#define AR5K_RX_DESC_ERROR0 0x00000000
-
-/* RX error word 1 fields/flags */
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
+#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 /* key cache miss */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE 0x0000ff00 /* phy error code overlays key index and valid fields */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S 8
/**
* enum ath5k_phy_error_code - PHY Error codes
*/
enum ath5k_phy_error_code {
- AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
- AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
+ AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
/* these are specific to the 5212 */
@@ -148,112 +132,111 @@ struct ath5k_hw_2w_tx_ctl {
} __packed;
/* TX control word 0 fields/flags */
-#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12
-#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000
+#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210 0x0003f000 /* [5210] header length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S 12
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 /* tx rate */
#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18
-#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000
-#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000
-#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET 0x00800000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_VEOL 0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE 0x1c000000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000
-
+#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
+#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210 0x00800000 /* [5210] long packet */
+#define AR5K_2W_TX_DESC_CTL0_VEOL_5211 0x00800000 /* [5211] virtual end-of-list */
+#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 /* [5210] antenna selection */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 /* [5211] antenna selection */
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
(ah->ah_version == AR5K_AR5210 ? \
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
-
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
-#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000
-#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210 0x1c000000 /* [5210] frame type */
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S 26
+#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
+#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* key is valid */
/* TX control word 1 fields/flags */
-#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff
-#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000
-
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \
+#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 0x0007e000 /* [5210] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211 0x000fe000 /* [5211] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX \
(ah->ah_version == AR5K_AR5210 ? \
- AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \
- AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
-
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20
-#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 : \
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211)
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S 13
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211 0x00700000 /* [5211] frame type */
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S 20
+#define AR5K_2W_TX_DESC_CTL1_NOACK_5211 0x00800000 /* [5211] no ACK */
+#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210 0xfff80000 /* [5210] lower 13 bit of duration */
/* Frame types */
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 1
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 2
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 3
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON 3
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
/*
* 5212 hardware 4-word TX control descriptor
*/
struct ath5k_hw_4w_tx_ctl {
u32 tx_control_0; /* TX control word 0 */
+ u32 tx_control_1; /* TX control word 1 */
+ u32 tx_control_2; /* TX control word 2 */
+ u32 tx_control_3; /* TX control word 3 */
+} __packed;
-#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
-#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000
+/* TX control word 0 fields/flags */
+#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 /* transmit power */
#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16
-#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000
-#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000
-#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000
-#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000
+#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
+#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 /* virtual end-of-list */
+#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 /* TX antenna selection */
#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
-#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000
-#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000
-#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000
-
- u32 tx_control_1; /* TX control word 1 */
+#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
+#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* destination index valid */
+#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 /* precede frame with CTS */
-#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff
-#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX 0x000fe000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13
-#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000
+/* TX control word 1 fields/flags */
+#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX 0x000fe000 /* destination table index */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S 13
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 /* frame type */
#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20
-#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000
+#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 /* no ACK */
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 /* compression processing */
#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25
-#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 /* length of frame IV */
#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27
-#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 /* length of frame ICV */
#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29
- u32 tx_control_2; /* TX control word 2 */
-
-#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff
-#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE 0x00008000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
-
- u32 tx_control_3; /* TX control word 3 */
-
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0
+/* TX control word 2 fields/flags */
+#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff /* RTS/CTS duration */
+#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN 0x00008000 /* frame duration update */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 /* series 0 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 /* series 1 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 /* series 2 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 /* series 3 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
+
+/* TX control word 3 fields/flags */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f /* series 0 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 /* series 1 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 /* series 2 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 /* series 3 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15
-#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
-} __packed;
/*
* Common TX status descriptor
@@ -264,37 +247,34 @@ struct ath5k_hw_tx_status {
} __packed;
/* TX status word 0 fields/flags */
-#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001
-#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002
-#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004
-#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008
-/*???
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT 0x000000f0
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S 4
-*/
-#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0
+#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
+#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 /* excessive retries */
+#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 /* FIFO underrun */
+#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 /* TX filter indication */
+/* according to the HAL sources the spec has short/long retry counts reversed.
+ * we have it reversed to the HAL sources as well, for 5210 and 5211.
+ * For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT,
+ * but used respectively as SHORT and LONG retry count in the code later. This
+ * is consistent with the definitions here... TODO: check */
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 /* short retry count */
#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4
-/*???
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT 0x00000f00
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S 8
-*/
-#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 /* long retry count */
#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT 0x0000f000
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S 12
-#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211 0x0000f000 /* [5211+] virtual collision count */
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S 12
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 /* TX timestamp */
#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
/* TX status word 1 fields/flags */
-#define AR5K_DESC_TX_STATUS1_DONE 0x00000001
-#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe
+#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe /* TX sequence number */
#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
-#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 /* signal strength of ACK */
#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX 0x00600000
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21
-#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000
-#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212 0x00600000 /* [5212] final TX attempt series ix */
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S 21
+#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
+#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
/*
* 5210/5211 hardware TX descriptor
@@ -313,18 +293,15 @@ struct ath5k_hw_5212_tx_desc {
} __packed;
/*
- * common hardware RX descriptor
+ * Common hardware RX descriptor
*/
struct ath5k_hw_all_rx_desc {
- struct ath5k_hw_rx_ctl rx_ctl;
- union {
- struct ath5k_hw_rx_status rx_stat;
- struct ath5k_hw_rx_error rx_err;
- } u;
+ struct ath5k_hw_rx_ctl rx_ctl;
+ struct ath5k_hw_rx_status rx_stat;
} __packed;
/*
- * Atheros hardware descriptor
+ * Atheros hardware DMA descriptor
* This is read and written to by the hardware
*/
struct ath5k_desc {
@@ -346,4 +323,3 @@ struct ath5k_desc {
#define AR5K_TXDESC_CTSENA 0x0008
#define AR5K_TXDESC_INTREQ 0x0010
#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
-
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 73c4fcd..6284c38 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1768,7 +1768,7 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
if (enable) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
- AR5K_PHY_RESTART_DIV_GC, 1);
+ AR5K_PHY_RESTART_DIV_GC, 4);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
AR5K_PHY_FAST_ANT_DIV_EN);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index dd112be..973ae4f 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -32,7 +32,8 @@ ath9k_hw-y:= \
mac.o \
ar9002_mac.o \
ar9003_mac.o \
- ar9003_eeprom.o
+ ar9003_eeprom.o \
+ ar9003_paprd.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 3da820f..cc648b6 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +17,99 @@
#include "hw.h"
#include "hw-ops.h"
-static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
- struct ath9k_channel *chan)
+struct ani_ofdm_level_entry {
+ int spur_immunity_level;
+ int fir_step_level;
+ int ofdm_weak_signal_on;
+};
+
+/* values here are relative to the INI */
+
+/*
+ * Legend:
+ *
+ * SI: Spur immunity
+ * FS: FIR Step
+ * WS: OFDM / CCK Weak Signal detection
+ * MRC-CCK: Maximal Ratio Combining for CCK
+ */
+
+static const struct ani_ofdm_level_entry ofdm_level_table[] = {
+ /* SI FS WS */
+ { 0, 0, 1 }, /* lvl 0 */
+ { 1, 1, 1 }, /* lvl 1 */
+ { 2, 2, 1 }, /* lvl 2 */
+ { 3, 2, 1 }, /* lvl 3 (default) */
+ { 4, 3, 1 }, /* lvl 4 */
+ { 5, 4, 1 }, /* lvl 5 */
+ { 6, 5, 1 }, /* lvl 6 */
+ { 7, 6, 1 }, /* lvl 7 */
+ { 7, 7, 1 }, /* lvl 8 */
+ { 7, 8, 0 } /* lvl 9 */
+};
+#define ATH9K_ANI_OFDM_NUM_LEVEL \
+ (sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0]))
+#define ATH9K_ANI_OFDM_MAX_LEVEL \
+ (ATH9K_ANI_OFDM_NUM_LEVEL-1)
+#define ATH9K_ANI_OFDM_DEF_LEVEL \
+ 3 /* default level - matches the INI settings */
+
+/*
+ * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm.
+ * With OFDM for single stream you just add up all antenna inputs, you're
+ * only interested in what you get after FFT. Signal aligment is also not
+ * required for OFDM because any phase difference adds up in the frequency
+ * domain.
+ *
+ * MRC requires extra work for use with CCK. You need to align the antenna
+ * signals from the different antenna before you can add the signals together.
+ * You need aligment of signals as CCK is in time domain, so addition can cancel
+ * your signal completely if phase is 180 degrees (think of adding sine waves).
+ * You also need to remove noise before the addition and this is where ANI
+ * MRC CCK comes into play. One of the antenna inputs may be stronger but
+ * lower SNR, so just adding after alignment can be dangerous.
+ *
+ * Regardless of alignment in time, the antenna signals add constructively after
+ * FFT and improve your reception. For more information:
+ *
+ * http://en.wikipedia.org/wiki/Maximal-ratio_combining
+ */
+
+struct ani_cck_level_entry {
+ int fir_step_level;
+ int mrc_cck_on;
+};
+
+static const struct ani_cck_level_entry cck_level_table[] = {
+ /* FS MRC-CCK */
+ { 0, 1 }, /* lvl 0 */
+ { 1, 1 }, /* lvl 1 */
+ { 2, 1 }, /* lvl 2 (default) */
+ { 3, 1 }, /* lvl 3 */
+ { 4, 0 }, /* lvl 4 */
+ { 5, 0 }, /* lvl 5 */
+ { 6, 0 }, /* lvl 6 */
+ { 7, 0 }, /* lvl 7 (only for high rssi) */
+ { 8, 0 } /* lvl 8 (only for high rssi) */
+};
+
+#define ATH9K_ANI_CCK_NUM_LEVEL \
+ (sizeof(cck_level_table)/sizeof(cck_level_table[0]))
+#define ATH9K_ANI_CCK_MAX_LEVEL \
+ (ATH9K_ANI_CCK_NUM_LEVEL-1)
+#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
+ (ATH9K_ANI_CCK_NUM_LEVEL-3)
+#define ATH9K_ANI_CCK_DEF_LEVEL \
+ 2 /* default level - matches the INI settings */
+
+/* Private to ani.c */
+static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->ani_lower_immunity(ah);
+}
+
+int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
int i;
@@ -48,7 +139,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
stats->beacons += REG_READ(ah, AR_BEACON_CNT);
}
-static void ath9k_ani_restart(struct ath_hw *ah)
+static void ath9k_ani_restart_old(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
struct ath_common *common = ath9k_hw_common(ah);
@@ -96,7 +187,42 @@ static void ath9k_ani_restart(struct ath_hw *ah)
aniState->cckPhyErrCount = 0;
}
-static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
+static void ath9k_ani_restart_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+ aniState->listenTime = 0;
+
+ aniState->ofdmPhyErrBase = 0;
+ aniState->cckPhyErrBase = 0;
+
+ ath_print(common, ATH_DBG_ANI,
+ "Writing ofdmbase=%08x cckbase=%08x\n",
+ aniState->ofdmPhyErrBase,
+ aniState->cckPhyErrBase);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ aniState->ofdmPhyErrCount = 0;
+ aniState->cckPhyErrCount = 0;
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ar5416AniState *aniState;
@@ -168,7 +294,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
}
}
-static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
+static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ar5416AniState *aniState;
@@ -206,7 +332,125 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
}
}
-static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+/* Adjust the OFDM Noise Immunity Level */
+static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ aniState->noiseFloor = BEACON_RSSI(ah);
+
+ ath_print(common, ATH_DBG_ANI,
+ "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->ofdmNoiseImmunityLevel,
+ immunityLevel, aniState->noiseFloor,
+ aniState->rssiThrLow, aniState->rssiThrHigh);
+
+ aniState->ofdmNoiseImmunityLevel = immunityLevel;
+
+ entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+ entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+ if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ entry_ofdm->spur_immunity_level);
+
+ if (aniState->firstepLevel != entry_ofdm->fir_step_level &&
+ entry_ofdm->fir_step_level >= entry_cck->fir_step_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ entry_ofdm->fir_step_level);
+
+ if ((ah->opmode != NL80211_IFTYPE_STATION &&
+ ah->opmode != NL80211_IFTYPE_ADHOC) ||
+ aniState->noiseFloor <= aniState->rssiThrHigh) {
+ if (aniState->ofdmWeakSigDetectOff)
+ /* force on ofdm weak sig detect */
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ true);
+ else if (aniState->ofdmWeakSigDetectOff ==
+ entry_ofdm->ofdm_weak_signal_on)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ entry_ofdm->ofdm_weak_signal_on);
+ }
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+
+ if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
+}
+
+/*
+ * Set the ANI settings to match an CCK level.
+ */
+static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ aniState->noiseFloor = BEACON_RSSI(ah);
+ ath_print(common, ATH_DBG_ANI,
+ "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->cckNoiseImmunityLevel, immunityLevel,
+ aniState->noiseFloor, aniState->rssiThrLow,
+ aniState->rssiThrHigh);
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION ||
+ ah->opmode == NL80211_IFTYPE_ADHOC) &&
+ aniState->noiseFloor <= aniState->rssiThrLow &&
+ immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
+ immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
+
+ aniState->cckNoiseImmunityLevel = immunityLevel;
+
+ entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+ entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+ if (aniState->firstepLevel != entry_cck->fir_step_level &&
+ entry_cck->fir_step_level >= entry_ofdm->fir_step_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ entry_cck->fir_step_level);
+
+ /* Skip MRC CCK for pre AR9003 families */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return;
+
+ if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_MRC_CCK,
+ entry_cck->mrc_cck_on);
+}
+
+static void ath9k_hw_ani_cck_err_trigger_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+
+ if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
+}
+
+static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
int32_t rssi;
@@ -259,9 +503,53 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
}
}
+/*
+ * only lower either OFDM or CCK errors per turn
+ * we lower the other one next time
+ */
+static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ aniState = ah->curani;
+
+ /* lower OFDM noise immunity */
+ if (aniState->ofdmNoiseImmunityLevel > 0 &&
+ (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1);
+ return;
+ }
+
+ /* lower CCK noise immunity */
+ if (aniState->cckNoiseImmunityLevel > 0)
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
+}
+
+static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
+{
+ struct ath9k_channel *chan = ah->curchan;
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ u8 clockrate; /* in MHz */
+
+ if (!ah->curchan) /* should really check for CCK instead */
+ clockrate = ATH9K_CLOCK_RATE_CCK;
+ else if (conf->channel->band == IEEE80211_BAND_2GHZ)
+ clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
+ else if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (conf_is_ht40(conf))
+ return clockrate * 2;
+
+ return clockrate * 2;
+}
+
static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
u32 txFrameCount, rxFrameCount, cycleCount;
int32_t listenTime;
@@ -271,15 +559,31 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
aniState = ah->curani;
if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
-
listenTime = 0;
ah->stats.ast_ani_lzero++;
+ ath_print(common, ATH_DBG_ANI,
+ "1st call: aniState->cycleCount=%d\n",
+ aniState->cycleCount);
} else {
int32_t ccdelta = cycleCount - aniState->cycleCount;
int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
int32_t tfdelta = txFrameCount - aniState->txFrameCount;
- listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
+ int32_t clock_rate;
+
+ /*
+ * convert HW counter values to ms using mode
+ * specifix clock rate
+ */
+ clock_rate = ath9k_hw_chan_2_clockrate_mhz(ah) * 1000;;
+
+ listenTime = (ccdelta - rfdelta - tfdelta) / clock_rate;
+
+ ath_print(common, ATH_DBG_ANI,
+ "cyclecount=%d, rfcount=%d, "
+ "tfcount=%d, listenTime=%d CLOCK_RATE=%d\n",
+ ccdelta, rfdelta, tfdelta, listenTime, clock_rate);
}
+
aniState->cycleCount = cycleCount;
aniState->txFrameCount = txFrameCount;
aniState->rxFrameCount = rxFrameCount;
@@ -287,7 +591,7 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
return listenTime;
}
-void ath9k_ani_reset(struct ath_hw *ah)
+static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
{
struct ar5416AniState *aniState;
struct ath9k_channel *chan = ah->curchan;
@@ -340,7 +644,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
ah->curani->cckTrigLow =
ah->config.cck_trig_low;
}
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
return;
}
@@ -362,7 +666,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
~ATH9K_RX_FILTER_PHYERR);
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
ENABLE_REGWRITE_BUFFER(ah);
@@ -373,8 +677,102 @@ void ath9k_ani_reset(struct ath_hw *ah)
DISABLE_REGWRITE_BUFFER(ah);
}
-void ath9k_hw_ani_monitor(struct ath_hw *ah,
- struct ath9k_channel *chan)
+/*
+ * Restore the ANI parameters in the HAL and reset the statistics.
+ * This routine should be called for every hardware reset and for
+ * every channel change.
+ */
+static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!DO_ANI(ah))
+ return;
+
+ BUG_ON(aniState == NULL);
+ ah->stats.ast_ani_reset++;
+
+ /* only allow a subset of functions in AP mode */
+ if (ah->opmode == NL80211_IFTYPE_AP) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
+ ATH9K_ANI_FIRSTEP_LEVEL);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function |= ATH9K_ANI_MRC_CCK;
+ } else
+ ah->ani_function = 0;
+ }
+
+ /* always allow mode (on/off) to be controlled */
+ ah->ani_function |= ATH9K_ANI_MODE;
+
+ if (is_scanning ||
+ (ah->opmode != NL80211_IFTYPE_STATION &&
+ ah->opmode != NL80211_IFTYPE_ADHOC)) {
+ /*
+ * If we're scanning or in AP mode, the defaults (ini)
+ * should be in place. For an AP we assume the historical
+ * levels for this channel are probably outdated so start
+ * from defaults instead.
+ */
+ if (aniState->ofdmNoiseImmunityLevel !=
+ ATH9K_ANI_OFDM_DEF_LEVEL ||
+ aniState->cckNoiseImmunityLevel !=
+ ATH9K_ANI_CCK_DEF_LEVEL) {
+ ath_print(common, ATH_DBG_ANI,
+ "Restore defaults: opmode %u "
+ "chan %d Mhz/0x%x is_scanning=%d "
+ "ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
+
+ ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
+ ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
+ }
+ } else {
+ /*
+ * restore historical levels for this channel
+ */
+ ath_print(common, ATH_DBG_ANI,
+ "Restore history: opmode %u "
+ "chan %d Mhz/0x%x is_scanning=%d "
+ "ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
+
+ ath9k_hw_set_ofdm_nil(ah,
+ aniState->ofdmNoiseImmunityLevel);
+ ath9k_hw_set_cck_nil(ah,
+ aniState->cckNoiseImmunityLevel);
+ }
+
+ /*
+ * enable phy counters if hw supports or if not, enable phy
+ * interrupts (so we can count each one)
+ */
+ ath9k_ani_restart_new(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ath9k_hw_ani_monitor_old(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
struct ar5416AniState *aniState;
struct ath_common *common = ath9k_hw_common(ah);
@@ -390,7 +788,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
listenTime = ath9k_hw_ani_get_listen_time(ah);
if (listenTime < 0) {
ah->stats.ast_ani_lneg++;
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
return;
}
@@ -444,21 +842,166 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
aniState->cckPhyErrCount <= aniState->listenTime *
aniState->cckTrigLow / 1000)
ath9k_hw_ani_lower_immunity(ah);
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
} else if (aniState->listenTime > ah->aniperiod) {
if (aniState->ofdmPhyErrCount > aniState->listenTime *
aniState->ofdmTrigHigh / 1000) {
- ath9k_hw_ani_ofdm_err_trigger(ah);
- ath9k_ani_restart(ah);
+ ath9k_hw_ani_ofdm_err_trigger_old(ah);
+ ath9k_ani_restart_old(ah);
} else if (aniState->cckPhyErrCount >
aniState->listenTime * aniState->cckTrigHigh /
1000) {
- ath9k_hw_ani_cck_err_trigger(ah);
- ath9k_ani_restart(ah);
+ ath9k_hw_ani_cck_err_trigger_old(ah);
+ ath9k_ani_restart_old(ah);
+ }
+ }
+}
+
+static void ath9k_hw_ani_monitor_new(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int32_t listenTime;
+ u32 phyCnt1, phyCnt2;
+ u32 ofdmPhyErrCnt, cckPhyErrCnt;
+ u32 ofdmPhyErrRate, cckPhyErrRate;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+ if (WARN_ON(!aniState))
+ return;
+
+ listenTime = ath9k_hw_ani_get_listen_time(ah);
+ if (listenTime <= 0) {
+ ah->stats.ast_ani_lneg++;
+ /* restart ANI period if listenTime is invalid */
+ ath_print(common, ATH_DBG_ANI,
+ "listenTime=%d - on new ani monitor\n",
+ listenTime);
+ ath9k_ani_restart_new(ah);
+ return;
+ }
+
+ aniState->listenTime += listenTime;
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+ if (phyCnt1 < aniState->ofdmPhyErrBase ||
+ phyCnt2 < aniState->cckPhyErrBase) {
+ if (phyCnt1 < aniState->ofdmPhyErrBase) {
+ ath_print(common, ATH_DBG_ANI,
+ "phyCnt1 0x%x, resetting "
+ "counter value to 0x%x\n",
+ phyCnt1,
+ aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_1,
+ aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1,
+ AR_PHY_ERR_OFDM_TIMING);
+ }
+ if (phyCnt2 < aniState->cckPhyErrBase) {
+ ath_print(common, ATH_DBG_ANI,
+ "phyCnt2 0x%x, resetting "
+ "counter value to 0x%x\n",
+ phyCnt2,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2,
+ AR_PHY_ERR_CCK_TIMING);
+ }
+ return;
+ }
+
+ ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
+ ah->stats.ast_ani_ofdmerrs +=
+ ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+
+ cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
+ ah->stats.ast_ani_cckerrs +=
+ cckPhyErrCnt - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = cckPhyErrCnt;
+
+ ath_print(common, ATH_DBG_ANI,
+ "Errors: OFDM=0x%08x-0x%08x=%d "
+ "CCK=0x%08x-0x%08x=%d\n",
+ phyCnt1,
+ aniState->ofdmPhyErrBase,
+ ofdmPhyErrCnt,
+ phyCnt2,
+ aniState->cckPhyErrBase,
+ cckPhyErrCnt);
+
+ ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 /
+ aniState->listenTime;
+ cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
+ aniState->listenTime;
+
+ ath_print(common, ATH_DBG_ANI,
+ "listenTime=%d OFDM:%d errs=%d/s CCK:%d "
+ "errs=%d/s ofdm_turn=%d\n",
+ listenTime, aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate, aniState->ofdmsTurn);
+
+ if (aniState->listenTime > 5 * ah->aniperiod) {
+ if (ofdmPhyErrRate <= aniState->ofdmTrigLow &&
+ cckPhyErrRate <= aniState->cckTrigLow) {
+ ath_print(common, ATH_DBG_ANI,
+ "1. listenTime=%d OFDM:%d errs=%d/s(<%d) "
+ "CCK:%d errs=%d/s(<%d) -> "
+ "ath9k_hw_ani_lower_immunity()\n",
+ aniState->listenTime,
+ aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate,
+ aniState->ofdmTrigLow,
+ aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate,
+ aniState->cckTrigLow);
+ ath9k_hw_ani_lower_immunity(ah);
+ aniState->ofdmsTurn = !aniState->ofdmsTurn;
+ }
+ ath_print(common, ATH_DBG_ANI,
+ "1 listenTime=%d ofdm=%d/s cck=%d/s - "
+ "calling ath9k_ani_restart_new()\n",
+ aniState->listenTime, ofdmPhyErrRate, cckPhyErrRate);
+ ath9k_ani_restart_new(ah);
+ } else if (aniState->listenTime > ah->aniperiod) {
+ /* check to see if need to raise immunity */
+ if (ofdmPhyErrRate > aniState->ofdmTrigHigh &&
+ (cckPhyErrRate <= aniState->cckTrigHigh ||
+ aniState->ofdmsTurn)) {
+ ath_print(common, ATH_DBG_ANI,
+ "2 listenTime=%d OFDM:%d errs=%d/s(>%d) -> "
+ "ath9k_hw_ani_ofdm_err_trigger_new()\n",
+ aniState->listenTime,
+ aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate,
+ aniState->ofdmTrigHigh);
+ ath9k_hw_ani_ofdm_err_trigger_new(ah);
+ ath9k_ani_restart_new(ah);
+ aniState->ofdmsTurn = false;
+ } else if (cckPhyErrRate > aniState->cckTrigHigh) {
+ ath_print(common, ATH_DBG_ANI,
+ "3 listenTime=%d CCK:%d errs=%d/s(>%d) -> "
+ "ath9k_hw_ani_cck_err_trigger_new()\n",
+ aniState->listenTime,
+ aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate,
+ aniState->cckTrigHigh);
+ ath9k_hw_ani_cck_err_trigger_new(ah);
+ ath9k_ani_restart_new(ah);
+ aniState->ofdmsTurn = true;
}
}
}
-EXPORT_SYMBOL(ath9k_hw_ani_monitor);
void ath9k_enable_mib_counters(struct ath_hw *ah)
{
@@ -543,7 +1086,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
* any of the MIB counters overflow/trigger so don't assume we're
* here because a PHY error counter triggered.
*/
-void ath9k_hw_procmibevent(struct ath_hw *ah)
+static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah)
{
u32 phyCnt1, phyCnt2;
@@ -556,8 +1099,15 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
/* Clear the mib counters and save them in the stats */
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
- if (!DO_ANI(ah))
+ if (!DO_ANI(ah)) {
+ /*
+ * We must always clear the interrupt cause by
+ * resetting the phy error regs.
+ */
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
return;
+ }
/* NB: these are not reset-on-read */
phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
@@ -585,14 +1135,51 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
* check will never be true.
*/
if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
- ath9k_hw_ani_ofdm_err_trigger(ah);
+ ath9k_hw_ani_ofdm_err_trigger_new(ah);
if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
- ath9k_hw_ani_cck_err_trigger(ah);
+ ath9k_hw_ani_cck_err_trigger_old(ah);
/* NB: always restart to insure the h/w counters are reset */
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
}
}
-EXPORT_SYMBOL(ath9k_hw_procmibevent);
+
+/*
+ * Process a MIB interrupt. We may potentially be invoked because
+ * any of the MIB counters overflow/trigger so don't assume we're
+ * here because a PHY error counter triggered.
+ */
+static void ath9k_hw_proc_mib_event_new(struct ath_hw *ah)
+{
+ u32 phyCnt1, phyCnt2;
+
+ /* Reset these counters regardless */
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+ if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
+ REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
+
+ /* Clear the mib counters and save them in the stats */
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ if (!DO_ANI(ah)) {
+ /*
+ * We must always clear the interrupt cause by
+ * resetting the phy error regs.
+ */
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
+ return;
+ }
+
+ /* NB: these are not reset-on-read */
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+ /* NB: always restart to insure the h/w counters are reset */
+ if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
+ ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK))
+ ath9k_ani_restart_new(ah);
+}
void ath9k_hw_ani_setup(struct ath_hw *ah)
{
@@ -620,22 +1207,70 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
memset(ah->ani, 0, sizeof(ah->ani));
for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
- ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
- ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
- ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
- ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
+ if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
+ ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
+ ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
+
+ ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
+ ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_NEW;
+
+ ah->ani[i].spurImmunityLevel =
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+
+ ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+
+ ah->ani[i].ofdmPhyErrBase = 0;
+ ah->ani[i].cckPhyErrBase = 0;
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani[i].mrcCCKOff =
+ !ATH9K_ANI_ENABLE_MRC_CCK;
+ else
+ ah->ani[i].mrcCCKOff = true;
+
+ ah->ani[i].ofdmsTurn = true;
+ } else {
+ ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+
+ ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+
+ ah->ani[i].spurImmunityLevel =
+ ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
+ ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
+
+ ah->ani[i].ofdmPhyErrBase =
+ AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->ani[i].cckPhyErrBase =
+ AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->ani[i].cckWeakSigThreshold =
+ ATH9K_ANI_CCK_WEAK_SIG_THR;
+ }
+
ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
ah->ani[i].ofdmWeakSigDetectOff =
!ATH9K_ANI_USE_OFDM_WEAK_SIG;
- ah->ani[i].cckWeakSigThreshold =
- ATH9K_ANI_CCK_WEAK_SIG_THR;
- ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
- ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
- ah->ani[i].ofdmPhyErrBase =
- AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
- ah->ani[i].cckPhyErrBase =
- AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->ani[i].cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+ }
+
+ /*
+ * since we expect some ongoing maintenance on the tables, let's sanity
+ * check here default level should not modify INI setting.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ entry_ofdm = &ofdm_level_table[ATH9K_ANI_OFDM_DEF_LEVEL];
+ entry_cck = &cck_level_table[ATH9K_ANI_CCK_DEF_LEVEL];
+
+ ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
+ } else {
+ ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
}
ath_print(common, ATH_DBG_ANI,
@@ -654,7 +1289,34 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath9k_enable_mib_counters(ah);
- ah->aniperiod = ATH9K_ANI_PERIOD;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
}
+
+void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->ani_reset = ath9k_ani_reset_old;
+ priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_old;
+
+ ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_old;
+ ops->ani_monitor = ath9k_hw_ani_monitor_old;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v1\n");
+}
+
+void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->ani_reset = ath9k_ani_reset_new;
+ priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_new;
+
+ ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_new;
+ ops->ani_monitor = ath9k_hw_ani_monitor_new;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v2\n");
+}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 3356762..f4d0a4d 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -23,23 +23,55 @@
#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
-#define ATH9K_ANI_OFDM_TRIG_HIGH 500
-#define ATH9K_ANI_OFDM_TRIG_LOW 200
-#define ATH9K_ANI_CCK_TRIG_HIGH 200
-#define ATH9K_ANI_CCK_TRIG_LOW 100
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
+#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000
+
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
+#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
+
+/* units are errors per second */
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
+#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
+
+/* units are errors per second */
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
+#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
+
#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
#define ATH9K_ANI_CCK_WEAK_SIG_THR false
-#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
-#define ATH9K_ANI_FIRSTEP_LVL 0
+
+#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7
+#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
+
+#define ATH9K_ANI_FIRSTEP_LVL_OLD 0
+#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
+
#define ATH9K_ANI_RSSI_THR_HIGH 40
#define ATH9K_ANI_RSSI_THR_LOW 7
-#define ATH9K_ANI_PERIOD 100
+
+#define ATH9K_ANI_PERIOD_OLD 100
+#define ATH9K_ANI_PERIOD_NEW 1000
+
+/* in ms */
+#define ATH9K_ANI_POLLINTERVAL_OLD 100
+#define ATH9K_ANI_POLLINTERVAL_NEW 1000
#define HAL_NOISE_IMMUNE_MAX 4
#define HAL_SPUR_IMMUNE_MAX 7
#define HAL_FIRST_STEP_MAX 2
+#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
+#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
+#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
+#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
+
+#define ATH9K_ANI_ENABLE_MRC_CCK true
+
+/* values here are relative to the INI */
+
enum ath9k_ani_cmd {
ATH9K_ANI_PRESENT = 0x1,
ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
@@ -49,7 +81,8 @@ enum ath9k_ani_cmd {
ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
ATH9K_ANI_MODE = 0x40,
ATH9K_ANI_PHYERR_RESET = 0x80,
- ATH9K_ANI_ALL = 0xff
+ ATH9K_ANI_MRC_CCK = 0x100,
+ ATH9K_ANI_ALL = 0xfff
};
struct ath9k_mib_stats {
@@ -60,9 +93,31 @@ struct ath9k_mib_stats {
u32 beacons;
};
+/* INI default values for ANI registers */
+struct ath9k_ani_default {
+ u16 m1ThreshLow;
+ u16 m2ThreshLow;
+ u16 m1Thresh;
+ u16 m2Thresh;
+ u16 m2CountThr;
+ u16 m2CountThrLow;
+ u16 m1ThreshLowExt;
+ u16 m2ThreshLowExt;
+ u16 m1ThreshExt;
+ u16 m2ThreshExt;
+ u16 firstep;
+ u16 firstepLow;
+ u16 cycpwrThr1;
+ u16 cycpwrThr1Ext;
+};
+
struct ar5416AniState {
struct ath9k_channel *c;
u8 noiseImmunityLevel;
+ u8 ofdmNoiseImmunityLevel;
+ u8 cckNoiseImmunityLevel;
+ bool ofdmsTurn;
+ u8 mrcCCKOff;
u8 spurImmunityLevel;
u8 firstepLevel;
u8 ofdmWeakSigDetectOff;
@@ -85,6 +140,7 @@ struct ar5416AniState {
int16_t pktRssi[2];
int16_t ofdmErrRssi[2];
int16_t cckErrRssi[2];
+ struct ath9k_ani_default iniDef;
};
struct ar5416Stats {
@@ -108,15 +164,13 @@ struct ar5416Stats {
};
#define ah_mibStats stats.ast_mibstats
-void ath9k_ani_reset(struct ath_hw *ah);
-void ath9k_hw_ani_monitor(struct ath_hw *ah,
- struct ath9k_channel *chan);
void ath9k_enable_mib_counters(struct ath_hw *ah);
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
u32 *rxf_pcnt, u32 *txf_pcnt);
-void ath9k_hw_procmibevent(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
+int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
+ struct ath9k_channel *chan);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 96018d5..ee34a49 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -19,7 +19,30 @@
#include "../regd.h"
#include "ar9002_phy.h"
-/* All code below is for non single-chip solutions */
+/* All code below is for AR5008, AR9001, AR9002 */
+
+static const int firstep_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off = 31;
+static const int m2CountThrLow_off = 63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
/**
* ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
@@ -1026,8 +1049,9 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
return pll;
}
-static bool ar5008_hw_ani_control(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd, int param)
+static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd,
+ int param)
{
struct ar5416AniState *aniState = ah->curani;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1209,6 +1233,265 @@ static bool ar5008_hw_ani_control(struct ath_hw *ah,
return true;
}
+static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd,
+ int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ s32 value, value2;
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ /*
+ * on == 1 means ofdm weak signal detection is ON
+ * on == 1 is the default, for less noise immunity
+ *
+ * on == 0 means ofdm weak signal detection is OFF
+ * on == 0 means more noise imm
+ */
+ u32 on = param ? 1 : 0;
+ /*
+ * make register setting for default
+ * (weak sig detect ON) come from INI file
+ */
+ int m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ int m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ int m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ int m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ int m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ int m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ int m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ int m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ int m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ int m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ !aniState->ofdmWeakSigDetectOff ?
+ "on" : "off",
+ on ? "on" : "off");
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep_table)) {
+ ath_print(common, ATH_DBG_ANI,
+ "ATH9K_ANI_FIRSTEP_LEVEL: level "
+ "out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep_table));
+ return false;
+ }
+
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstep;
+ if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ value);
+ /*
+ * we need to set first step low register too
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstepLow;
+ if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
+
+ if (level != aniState->firstepLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value,
+ aniState->iniDef.firstep);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value2,
+ aniState->iniDef.firstepLow);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
+ ath_print(common, ATH_DBG_ANI,
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
+ "out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1_table));
+ return false;
+ }
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1;
+ if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ value);
+
+ /*
+ * set AR_PHY_EXT_CCA for extension channel
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1Ext;
+ if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
+
+ if (level != aniState->spurImmunityLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_MRC_CCK:
+ /*
+ * You should not see this as AR5008, AR9001, AR9002
+ * does not have hardware support for MRC CCK.
+ */
+ WARN_ON(1);
+ break;
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
+ "MRCcck=%s listenTime=%d CC=%d listen=%d "
+ "ofdmErrs=%d cckErrs=%d\n",
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->firstepLevel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ aniState->listenTime,
+ aniState->cycleCount,
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+ return true;
+}
+
static void ar5008_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
@@ -1329,6 +1612,71 @@ static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
DISABLE_REGWRITE_BUFFER(ah);
}
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath9k_ani_default *iniDef;
+ int index;
+ u32 val;
+
+ index = ath9k_hw_get_ani_channel_idx(ah, chan);
+ aniState = &ah->ani[index];
+ ah->curani = aniState;
+ iniDef = &aniState->iniDef;
+
+ ath_print(common, ATH_DBG_ANI,
+ "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags);
+
+ val = REG_READ(ah, AR_PHY_SFCORR);
+ iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+ iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+ iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+ iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+ iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+ iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+ iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+ iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+ iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+ iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+ iniDef->firstep = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP);
+ iniDef->firstepLow = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_FIRSTEP_LOW);
+ iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+ AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1);
+ iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1);
+
+ /* these levels just got reset to defaults by the INI */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+ aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCKOff = true; /* not available on pre AR9003 */
+
+ aniState->cycleCount = 0;
+}
+
+
void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1350,10 +1698,15 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
priv_ops->restore_chainmask = ar5008_restore_chainmask;
priv_ops->set_diversity = ar5008_set_diversity;
- priv_ops->ani_control = ar5008_hw_ani_control;
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->loadnf = ar5008_hw_loadnf;
+ if (modparam_force_new_ani) {
+ priv_ops->ani_control = ar5008_hw_ani_control_new;
+ priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
+ } else
+ priv_ops->ani_control = ar5008_hw_ani_control_old;
+
if (AR_SREV_9100(ah))
priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
else if (AR_SREV_9160_10_OR_LATER(ah))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 7ba9dd6..0317ac9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -20,6 +20,10 @@
#include "ar9002_initvals.h"
#include "ar9002_phy.h"
+int modparam_force_new_ani;
+module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002");
+
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
static bool ar9002_hw_macversion_supported(u32 macversion)
@@ -636,4 +640,9 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
ar9002_hw_attach_calib_ops(ah);
ar9002_hw_attach_mac_ops(ah);
+
+ if (modparam_force_new_ani)
+ ath9k_hw_attach_ani_ops_new(ah);
+ else
+ ath9k_hw_attach_ani_ops_old(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 81bf6e5..ce8bb00 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -114,6 +114,10 @@
#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_LOW 0x9840
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW 0x00000FC0L
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW_S 6
+
#define AR_PHY_AGC_CTL1 0x985C
#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
@@ -325,6 +329,9 @@
#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
+
#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
#define AR_PHY_EXT_MINCCA_PWR_S 23
#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
index f82a00d..d3375fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
@@ -835,71 +835,71 @@ static const u32 ar9300_2p0_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
- {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
- {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,71 +913,71 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
- {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
- {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 7451505..ec98ab5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -835,71 +835,71 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
- {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
- {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,71 +913,71 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
- {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
- {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 23eb60e..343c9a4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -67,6 +67,7 @@ static const struct ar9300_eeprom ar9300_default = {
* bit2 - enable fastClock - enabled
* bit3 - enable doubling - enabled
* bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
*/
.miscConfiguration = 0, /* bit0 - turn down drivestrength */
.eepromWriteEnableGpio = 3,
@@ -129,9 +130,11 @@ static const struct ar9300_eeprom ar9300_default = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .futureModal = { /* [32] */
+ .papdRateMaskHt20 = LE32(0x80c080),
+ .papdRateMaskHt40 = LE32(0x80c080),
+ .futureModal = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0, 0, 0, 0
},
},
.calFreqPier2G = {
@@ -326,9 +329,11 @@ static const struct ar9300_eeprom ar9300_default = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0xf0e0e0),
+ .papdRateMaskHt40 = LE32(0xf0e0e0),
.futureModal = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0, 0, 0, 0
},
},
.calFreqPier5G = {
@@ -644,6 +649,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return (pBase->featureEnable & 0x10) >> 4;
case EEP_SWREG:
return le32_to_cpu(pBase->swreg);
+ case EEP_PAPRD:
+ return !!(pBase->featureEnable & BIT(5));
default:
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 23fb353..3c533bb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -234,7 +234,9 @@ struct ar9300_modal_eep_header {
u8 txEndToRxOn;
u8 txFrameToXpaOn;
u8 thresh62;
- u8 futureModal[32];
+ __le32 papdRateMaskHt20;
+ __le32 papdRateMaskHt40;
+ u8 futureModal[24];
} __packed;
struct ar9300_cal_data_per_freq_op_loop {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 863f61e..0641689 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -295,6 +295,26 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
/* Several PCIe massages to ensure proper behaviour */
if (ah->config.pcie_waen)
REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
+ else
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ }
+
+ /*
+ * Configire PCIE after Ini init. SERDES values now come from ini file
+ * This enables PCIe low power mode.
+ */
+ if (ah->config.pcieSerDesWrite) {
+ unsigned int i;
+ struct ar5416IniArray *array;
+
+ array = power_off ? &ah->iniPcieSerdes :
+ &ah->iniPcieSerdesLowPower;
+
+ for (i = 0; i < array->ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(array, i, 0),
+ INI_RA(array, i, 1));
+ }
}
}
@@ -313,4 +333,6 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
ar9003_hw_attach_phy_ops(ah);
ar9003_hw_attach_calib_ops(ah);
ar9003_hw_attach_mac_ops(ah);
+
+ ath9k_hw_attach_ani_ops_new(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 4073107..06ef710 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -470,6 +470,14 @@ static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
ads->ctl11 &= ~AR_VirtMoreFrag;
}
+void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->ctl12 |= SM(chains, AR_PAPRDChainMask);
+}
+EXPORT_SYMBOL(ar9003_hw_set_paprd_txdesc);
+
void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
{
struct ath_hw_ops *ops = ath9k_hw_ops(hw);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index 5a7a286..f76f27d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -40,6 +40,10 @@
#define AR_Not_Sounding 0x20000000
+/* ctl 12 */
+#define AR_PAPRDChainMask 0x00000e00
+#define AR_PAPRDChainMask_S 9
+
#define MAP_ISR_S2_CST 6
#define MAP_ISR_S2_GTT 6
#define MAP_ISR_S2_TIM 3
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
new file mode 100644
index 0000000..49e0c86
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+void ar9003_paprd_enable(struct ath_hw *ah, bool val)
+{
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+}
+EXPORT_SYMBOL(ar9003_paprd_enable);
+
+static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_modal_eep_header *hdr;
+ const u32 ctrl0[3] = {
+ AR_PHY_PAPRD_CTRL0_B0,
+ AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_B2
+ };
+ const u32 ctrl1[3] = {
+ AR_PHY_PAPRD_CTRL1_B0,
+ AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_B2
+ };
+ u32 am_mask, ht40_mask;
+ int i;
+
+ if (ah->curchan && IS_CHAN_5GHZ(ah->curchan))
+ hdr = &eep->modalHeader5G;
+ else
+ hdr = &eep->modalHeader2G;
+
+ am_mask = le32_to_cpu(hdr->papdRateMaskHt20);
+ ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask);
+
+ for (i = 0; i < 3; i++) {
+ REG_RMW_FIELD(ah, ctrl0[i],
+ AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK, 181);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT, 361);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+ REG_RMW_FIELD(ah, ctrl0[i],
+ AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH, 3);
+ }
+
+ ar9003_paprd_enable(ah, false);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
+ AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, 147);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
+ -15);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
+ 100);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 261376);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_1_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 248079);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_2_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 233759);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_3_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 220464);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_4_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 208194);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_5_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 196949);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_6_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
+}
+
+static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
+{
+ u32 *entry = ah->paprd_gain_table_entries;
+ u8 *index = ah->paprd_gain_table_index;
+ u32 reg = AR_PHY_TXGAIN_TABLE;
+ int i;
+
+ memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
+ memset(index, 0, sizeof(ah->paprd_gain_table_index));
+
+ for (i = 0; i < 32; i++) {
+ entry[i] = REG_READ(ah, reg);
+ index[i] = (entry[i] >> 24) & 0xff;
+ reg += 4;
+ }
+}
+
+static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
+ int target_power)
+{
+ int olpc_gain_delta = 0;
+ int alpha_therm, alpha_volt;
+ int therm_cal_value, volt_cal_value;
+ int therm_value, volt_value;
+ int thermal_gain_corr, voltage_gain_corr;
+ int desired_scale, desired_gain = 0;
+ u32 reg;
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+ desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
+ AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
+ alpha_therm = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM);
+ alpha_volt = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_VOLT);
+ therm_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+ AR_PHY_TPC_18_THERM_CAL_VALUE);
+ volt_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+ AR_PHY_TPC_18_VOLT_CAL_VALUE);
+ therm_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+ AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE);
+ volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+ AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE);
+
+ if (chain == 0)
+ reg = AR_PHY_TPC_11_B0;
+ else if (chain == 1)
+ reg = AR_PHY_TPC_11_B1;
+ else
+ reg = AR_PHY_TPC_11_B2;
+
+ olpc_gain_delta = REG_READ_FIELD(ah, reg,
+ AR_PHY_TPC_11_OLPC_GAIN_DELTA);
+
+ if (olpc_gain_delta >= 128)
+ olpc_gain_delta = olpc_gain_delta - 256;
+
+ thermal_gain_corr = (alpha_therm * (therm_value - therm_cal_value) +
+ (256 / 2)) / 256;
+ voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) +
+ (128 / 2)) / 128;
+ desired_gain = target_power - olpc_gain_delta - thermal_gain_corr -
+ voltage_gain_corr + desired_scale;
+
+ return desired_gain;
+}
+
+static void ar9003_tx_force_gain(struct ath_hw *ah, unsigned int gain_index)
+{
+ int selected_gain_entry, txbb1dbgain, txbb6dbgain, txmxrgain;
+ int padrvgnA, padrvgnB, padrvgnC, padrvgnD;
+ u32 *gain_table_entries = ah->paprd_gain_table_entries;
+
+ selected_gain_entry = gain_table_entries[gain_index];
+ txbb1dbgain = selected_gain_entry & 0x7;
+ txbb6dbgain = (selected_gain_entry >> 3) & 0x3;
+ txmxrgain = (selected_gain_entry >> 5) & 0xf;
+ padrvgnA = (selected_gain_entry >> 9) & 0xf;
+ padrvgnB = (selected_gain_entry >> 13) & 0xf;
+ padrvgnC = (selected_gain_entry >> 17) & 0xf;
+ padrvgnD = (selected_gain_entry >> 21) & 0x3;
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN, txbb1dbgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN, txbb6dbgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN, txmxrgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA, padrvgnA);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB, padrvgnB);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC, padrvgnC);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND, padrvgnD);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCED_DAC_GAIN, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCE_DAC_GAIN, 0);
+}
+
+static inline int find_expn(int num)
+{
+ return fls(num) - 1;
+}
+
+static inline int find_proper_scale(int expn, int N)
+{
+ return (expn > N) ? expn - 10 : 0;
+}
+
+#define NUM_BIN 23
+
+static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
+{
+ unsigned int thresh_accum_cnt;
+ int x_est[NUM_BIN + 1], Y[NUM_BIN + 1], theta[NUM_BIN + 1];
+ int PA_in[NUM_BIN + 1];
+ int B1_tmp[NUM_BIN + 1], B2_tmp[NUM_BIN + 1];
+ unsigned int B1_abs_max, B2_abs_max;
+ int max_index, scale_factor;
+ int y_est[NUM_BIN + 1];
+ int x_est_fxp1_nonlin, x_tilde[NUM_BIN + 1];
+ unsigned int x_tilde_abs;
+ int G_fxp, Y_intercept, order_x_by_y, M, I, L, sum_y_sqr, sum_y_quad;
+ int Q_x, Q_B1, Q_B2, beta_raw, alpha_raw, scale_B;
+ int Q_scale_B, Q_beta, Q_alpha, alpha, beta, order_1, order_2;
+ int order1_5x, order2_3x, order1_5x_rem, order2_3x_rem;
+ int y5, y3, tmp;
+ int theta_low_bin = 0;
+ int i;
+
+ /* disregard any bin that contains <= 16 samples */
+ thresh_accum_cnt = 16;
+ scale_factor = 5;
+ max_index = 0;
+ memset(theta, 0, sizeof(theta));
+ memset(x_est, 0, sizeof(x_est));
+ memset(Y, 0, sizeof(Y));
+ memset(y_est, 0, sizeof(y_est));
+ memset(x_tilde, 0, sizeof(x_tilde));
+
+ for (i = 0; i < NUM_BIN; i++) {
+ s32 accum_cnt, accum_tx, accum_rx, accum_ang;
+
+ /* number of samples */
+ accum_cnt = data_L[i] & 0xffff;
+
+ if (accum_cnt <= thresh_accum_cnt)
+ continue;
+
+ /* sum(tx amplitude) */
+ accum_tx = ((data_L[i] >> 16) & 0xffff) |
+ ((data_U[i] & 0x7ff) << 16);
+
+ /* sum(rx amplitude distance to lower bin edge) */
+ accum_rx = ((data_U[i] >> 11) & 0x1f) |
+ ((data_L[i + 23] & 0xffff) << 5);
+
+ /* sum(angles) */
+ accum_ang = ((data_L[i + 23] >> 16) & 0xffff) |
+ ((data_U[i + 23] & 0x7ff) << 16);
+
+ accum_tx <<= scale_factor;
+ accum_rx <<= scale_factor;
+ x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor;
+
+ Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor) +
+ (1 << scale_factor) * max_index + 16;
+
+ if (accum_ang >= (1 << 26))
+ accum_ang -= 1 << 27;
+
+ theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) /
+ accum_cnt;
+
+ max_index++;
+ }
+
+ /*
+ * Find average theta of first 5 bin and all of those to same value.
+ * Curve is linear at that range.
+ */
+ for (i = 1; i < 6; i++)
+ theta_low_bin += theta[i];
+
+ theta_low_bin = theta_low_bin / 5;
+ for (i = 1; i < 6; i++)
+ theta[i] = theta_low_bin;
+
+ /* Set values at origin */
+ theta[0] = theta_low_bin;
+ for (i = 0; i <= max_index; i++)
+ theta[i] -= theta_low_bin;
+
+ x_est[0] = 0;
+ Y[0] = 0;
+ scale_factor = 8;
+
+ /* low signal gain */
+ if (x_est[6] == x_est[3])
+ return false;
+
+ G_fxp =
+ (((Y[6] - Y[3]) * 1 << scale_factor) +
+ (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+
+ Y_intercept =
+ (G_fxp * (x_est[0] - x_est[3]) +
+ (1 << scale_factor)) / (1 << scale_factor) + Y[3];
+
+ for (i = 0; i <= max_index; i++)
+ y_est[i] = Y[i] - Y_intercept;
+
+ for (i = 0; i <= 3; i++) {
+ y_est[i] = i * 32;
+
+ /* prevent division by zero */
+ if (G_fxp == 0)
+ return false;
+
+ x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
+ }
+
+ x_est_fxp1_nonlin =
+ x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
+ G_fxp) / G_fxp;
+
+ order_x_by_y =
+ (x_est_fxp1_nonlin + y_est[max_index]) / y_est[max_index];
+
+ if (order_x_by_y == 0)
+ M = 10;
+ else if (order_x_by_y == 1)
+ M = 9;
+ else
+ M = 8;
+
+ I = (max_index > 15) ? 7 : max_index >> 1;
+ L = max_index - I;
+ scale_factor = 8;
+ sum_y_sqr = 0;
+ sum_y_quad = 0;
+ x_tilde_abs = 0;
+
+ for (i = 0; i <= L; i++) {
+ unsigned int y_sqr;
+ unsigned int y_quad;
+ unsigned int tmp_abs;
+
+ /* prevent division by zero */
+ if (y_est[i + I] == 0)
+ return false;
+
+ x_est_fxp1_nonlin =
+ x_est[i + I] - ((1 << scale_factor) * y_est[i + I] +
+ G_fxp) / G_fxp;
+
+ x_tilde[i] =
+ (x_est_fxp1_nonlin * (1 << M) + y_est[i + I]) / y_est[i +
+ I];
+ x_tilde[i] =
+ (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+ x_tilde[i] =
+ (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+ y_sqr =
+ (y_est[i + I] * y_est[i + I] +
+ (scale_factor * scale_factor)) / (scale_factor *
+ scale_factor);
+ tmp_abs = abs(x_tilde[i]);
+ if (tmp_abs > x_tilde_abs)
+ x_tilde_abs = tmp_abs;
+
+ y_quad = y_sqr * y_sqr;
+ sum_y_sqr = sum_y_sqr + y_sqr;
+ sum_y_quad = sum_y_quad + y_quad;
+ B1_tmp[i] = y_sqr * (L + 1);
+ B2_tmp[i] = y_sqr;
+ }
+
+ B1_abs_max = 0;
+ B2_abs_max = 0;
+ for (i = 0; i <= L; i++) {
+ int abs_val;
+
+ B1_tmp[i] -= sum_y_sqr;
+ B2_tmp[i] = sum_y_quad - sum_y_sqr * B2_tmp[i];
+
+ abs_val = abs(B1_tmp[i]);
+ if (abs_val > B1_abs_max)
+ B1_abs_max = abs_val;
+
+ abs_val = abs(B2_tmp[i]);
+ if (abs_val > B2_abs_max)
+ B2_abs_max = abs_val;
+ }
+
+ Q_x = find_proper_scale(find_expn(x_tilde_abs), 10);
+ Q_B1 = find_proper_scale(find_expn(B1_abs_max), 10);
+ Q_B2 = find_proper_scale(find_expn(B2_abs_max), 10);
+
+ beta_raw = 0;
+ alpha_raw = 0;
+ for (i = 0; i <= L; i++) {
+ x_tilde[i] = x_tilde[i] / (1 << Q_x);
+ B1_tmp[i] = B1_tmp[i] / (1 << Q_B1);
+ B2_tmp[i] = B2_tmp[i] / (1 << Q_B2);
+ beta_raw = beta_raw + B1_tmp[i] * x_tilde[i];
+ alpha_raw = alpha_raw + B2_tmp[i] * x_tilde[i];
+ }
+
+ scale_B =
+ ((sum_y_quad / scale_factor) * (L + 1) -
+ (sum_y_sqr / scale_factor) * sum_y_sqr) * scale_factor;
+
+ Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
+ scale_B = scale_B / (1 << Q_scale_B);
+ Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+ Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+ beta_raw = beta_raw / (1 << Q_beta);
+ alpha_raw = alpha_raw / (1 << Q_alpha);
+ alpha = (alpha_raw << 10) / scale_B;
+ beta = (beta_raw << 10) / scale_B;
+ order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B;
+ order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B;
+ order1_5x = order_1 / 5;
+ order2_3x = order_2 / 3;
+ order1_5x_rem = order_1 - 5 * order1_5x;
+ order2_3x_rem = order_2 - 3 * order2_3x;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ tmp = i * 32;
+ y5 = ((beta * tmp) >> 6) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = y5 >> order1_5x_rem;
+ y3 = (alpha * tmp) >> order2_3x;
+ y3 = (y3 * tmp) >> order2_3x;
+ y3 = (y3 * tmp) >> order2_3x;
+ y3 = y3 >> order2_3x_rem;
+ PA_in[i] = y5 + y3 + (256 * tmp) / G_fxp;
+
+ if (i >= 2) {
+ tmp = PA_in[i] - PA_in[i - 1];
+ if (tmp < 0)
+ PA_in[i] =
+ PA_in[i - 1] + (PA_in[i - 1] -
+ PA_in[i - 2]);
+ }
+
+ PA_in[i] = (PA_in[i] < 1400) ? PA_in[i] : 1400;
+ }
+
+ beta_raw = 0;
+ alpha_raw = 0;
+
+ for (i = 0; i <= L; i++) {
+ int theta_tilde =
+ ((theta[i + I] << M) + y_est[i + I]) / y_est[i + I];
+ theta_tilde =
+ ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+ theta_tilde =
+ ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+ beta_raw = beta_raw + B1_tmp[i] * theta_tilde;
+ alpha_raw = alpha_raw + B2_tmp[i] * theta_tilde;
+ }
+
+ Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+ Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+ beta_raw = beta_raw / (1 << Q_beta);
+ alpha_raw = alpha_raw / (1 << Q_alpha);
+
+ alpha = (alpha_raw << 10) / scale_B;
+ beta = (beta_raw << 10) / scale_B;
+ order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B + 5;
+ order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B + 5;
+ order1_5x = order_1 / 5;
+ order2_3x = order_2 / 3;
+ order1_5x_rem = order_1 - 5 * order1_5x;
+ order2_3x_rem = order_2 - 3 * order2_3x;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ int PA_angle;
+
+ /* pa_table[4] is calculated from PA_angle for i=5 */
+ if (i == 4)
+ continue;
+
+ tmp = i * 32;
+ if (beta > 0)
+ y5 = (((beta * tmp - 64) >> 6) -
+ (1 << order1_5x)) / (1 << order1_5x);
+ else
+ y5 = ((((beta * tmp - 64) >> 6) +
+ (1 << order1_5x)) / (1 << order1_5x));
+
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = y5 / (1 << order1_5x_rem);
+
+ if (beta > 0)
+ y3 = (alpha * tmp -
+ (1 << order2_3x)) / (1 << order2_3x);
+ else
+ y3 = (alpha * tmp +
+ (1 << order2_3x)) / (1 << order2_3x);
+ y3 = (y3 * tmp) / (1 << order2_3x);
+ y3 = (y3 * tmp) / (1 << order2_3x);
+ y3 = y3 / (1 << order2_3x_rem);
+
+ if (i < 4) {
+ PA_angle = 0;
+ } else {
+ PA_angle = y5 + y3;
+ if (PA_angle < -150)
+ PA_angle = -150;
+ else if (PA_angle > 150)
+ PA_angle = 150;
+ }
+
+ pa_table[i] = ((PA_in[i] & 0x7ff) << 11) + (PA_angle & 0x7ff);
+ if (i == 5) {
+ PA_angle = (PA_angle + 2) >> 1;
+ pa_table[i - 1] = ((PA_in[i - 1] & 0x7ff) << 11) +
+ (PA_angle & 0x7ff);
+ }
+ }
+
+ *gain = G_fxp;
+ return true;
+}
+
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+ struct ath9k_channel *chan, int chain)
+{
+ u32 *paprd_table_val = chan->pa_table[chain];
+ u32 small_signal_gain = chan->small_signal_gain[chain];
+ u32 training_power;
+ u32 reg = 0;
+ int i;
+
+ training_power =
+ REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+ training_power -= 4;
+
+ if (chain == 0)
+ reg = AR_PHY_PAPRD_MEM_TAB_B0;
+ else if (chain == 1)
+ reg = AR_PHY_PAPRD_MEM_TAB_B1;
+ else if (chain == 2)
+ reg = AR_PHY_PAPRD_MEM_TAB_B2;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ REG_WRITE(ah, reg, paprd_table_val[i]);
+ reg = reg + 4;
+ }
+
+ if (chain == 0)
+ reg = AR_PHY_PA_GAIN123_B0;
+ else if (chain == 1)
+ reg = AR_PHY_PA_GAIN123_B1;
+ else
+ reg = AR_PHY_PA_GAIN123_B2;
+
+ REG_RMW_FIELD(ah, reg, AR_PHY_PA_GAIN123_PA_GAIN1, small_signal_gain);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B0,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+}
+EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
+
+int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
+{
+
+ unsigned int i, desired_gain, gain_index;
+ unsigned int train_power;
+
+ train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+
+ train_power = train_power - 4;
+
+ desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
+
+ gain_index = 0;
+ for (i = 0; i < 32; i++) {
+ if (ah->paprd_gain_table_index[i] >= desired_gain)
+ break;
+ gain_index++;
+ }
+
+ ar9003_tx_force_gain(ah, gain_index);
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
+
+int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
+ int chain)
+{
+ u16 *small_signal_gain = &chan->small_signal_gain[chain];
+ u32 *pa_table = chan->pa_table[chain];
+ u32 *data_L, *data_U;
+ int i, status = 0;
+ u32 *buf;
+ u32 reg;
+
+ memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain]));
+
+ buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ data_L = &buf[0];
+ data_U = &buf[48];
+
+ REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+ reg = AR_PHY_CHAN_INFO_TAB_0;
+ for (i = 0; i < 48; i++)
+ data_L[i] = REG_READ(ah, reg + (i << 2));
+
+ REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+ for (i = 0; i < 48; i++)
+ data_U[i] = REG_READ(ah, reg + (i << 2));
+
+ if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain))
+ status = -2;
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ kfree(buf);
+
+ return status;
+}
+EXPORT_SYMBOL(ar9003_paprd_create_curve);
+
+int ar9003_paprd_init_table(struct ath_hw *ah)
+{
+ ar9003_paprd_setup_single_table(ah);
+ ar9003_paprd_get_gain_table(ah);
+ return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_init_table);
+
+bool ar9003_paprd_is_done(struct ath_hw *ah)
+{
+ return !!REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+}
+EXPORT_SYMBOL(ar9003_paprd_is_done);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index c714579..19bc05c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -17,6 +17,28 @@
#include "hw.h"
#include "ar9003_phy.h"
+static const int firstep_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off = 31;
+static const int m2CountThrLow_off = 63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
/**
* ar9003_hw_set_channel - set channel on single-chip device
* @ah: atheros hardware structure
@@ -94,7 +116,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
}
/**
- * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency
* @ah: atheros hardware structure
* @chan:
*
@@ -521,15 +543,6 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
u32 val = INI_RA(iniArr, i, column);
REG_WRITE(ah, reg, val);
-
- /*
- * Determine if this is a shift register value, and insert the
- * configured delay if so.
- */
- if (reg >= 0x16000 && reg < 0x17000
- && ah->config.analog_shiftreg)
- udelay(100);
-
DO_DELAY(regWrites);
}
}
@@ -732,71 +745,68 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
{
struct ar5416AniState *aniState = ah->curani;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ s32 value, value2;
switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC,
- AR_PHY_AGC_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC,
- AR_PHY_AGC_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
+ /*
+ * on == 1 means ofdm weak signal detection is ON
+ * on == 1 is the default, for less noise immunity
+ *
+ * on == 0 means ofdm weak signal detection is OFF
+ * on == 0 means more noise imm
+ */
u32 on = param ? 1 : 0;
+ /*
+ * make register setting for default
+ * (weak sig detect ON) come from INI file
+ */
+ int m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ int m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ int m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ int m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ int m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ int m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ int m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ int m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ int m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ int m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
- m1ThreshLow[on]);
+ m1ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
- m2ThreshLow[on]);
+ m2ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
- m2CountThrLow[on]);
+ m2CountThrLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
@@ -806,6 +816,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ !aniState->ofdmWeakSigDetectOff ?
+ "on" : "off",
+ on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
@@ -814,64 +830,167 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
}
break;
}
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
u32 level = param;
- if (level >= ARRAY_SIZE(firstep)) {
+ if (level >= ARRAY_SIZE(firstep_table)) {
ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
+ "ATH9K_ANI_FIRSTEP_LEVEL: level "
+ "out of range (%u > %u)\n",
level,
- (unsigned) ARRAY_SIZE(firstep));
+ (unsigned) ARRAY_SIZE(firstep_table));
return false;
}
+
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstep;
+ if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
+ value);
+ /*
+ * we need to set first step low register too
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstepLow;
+ if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
+
+ if (level != aniState->firstepLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value,
+ aniState->iniDef.firstep);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value2,
+ aniState->iniDef.firstepLow);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ }
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
u32 level = param;
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
+ "out of range (%u > %u)\n",
level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
+ (unsigned) ARRAY_SIZE(cycpwrThr1_table));
return false;
}
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1;
+ if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
+ value);
+
+ /*
+ * set AR_PHY_EXT_CCA for extension channel
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1Ext;
+ if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CYCPWR_THR1, value2);
+
+ if (level != aniState->spurImmunityLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ }
break;
}
+ case ATH9K_ANI_MRC_CCK:{
+ /*
+ * is_on == 1 means MRC CCK ON (default, less noise imm)
+ * is_on == 0 means MRC CCK is OFF (more noise imm)
+ */
+ bool is_on = param ? 1 : 0;
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_ENABLE, is_on);
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_MUX_REG, is_on);
+ if (!is_on != aniState->mrcCCKOff) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: MRC CCK: %s=>%s\n",
+ chan->channel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ is_on ? "on" : "off");
+ if (is_on)
+ ah->stats.ast_ani_ccklow++;
+ else
+ ah->stats.ast_ani_cckhigh++;
+ aniState->mrcCCKOff = !is_on;
+ }
+ break;
+ }
case ATH9K_ANI_PRESENT:
break;
default:
@@ -880,25 +999,19 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
return false;
}
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
+ "MRCcck=%s listenTime=%d CC=%d listen=%d "
+ "ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
+ !aniState->ofdmWeakSigDetectOff ? "on" : "off",
aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
- "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->cycleCount,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
+ !aniState->mrcCCKOff ? "on" : "off",
+ aniState->listenTime,
+ aniState->cycleCount,
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
return true;
}
@@ -1111,6 +1224,70 @@ static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
}
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath9k_ani_default *iniDef;
+ int index;
+ u32 val;
+
+ index = ath9k_hw_get_ani_channel_idx(ah, chan);
+ aniState = &ah->ani[index];
+ ah->curani = aniState;
+ iniDef = &aniState->iniDef;
+
+ ath_print(common, ATH_DBG_ANI,
+ "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags);
+
+ val = REG_READ(ah, AR_PHY_SFCORR);
+ iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+ iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+ iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+ iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+ iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+ iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+ iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+ iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+ iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+ iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+ iniDef->firstep = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP);
+ iniDef->firstepLow = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW);
+ iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+ AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1);
+ iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CYCPWR_THR1);
+
+ /* these levels just got reset to defaults by the INI */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+ aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
+
+ aniState->cycleCount = 0;
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1131,6 +1308,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->ani_control = ar9003_hw_ani_control;
priv_ops->do_getnf = ar9003_hw_do_getnf;
priv_ops->loadnf = ar9003_hw_loadnf;
+ priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
}
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 676d3f1..3394dfe 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -149,6 +149,8 @@
#define AR_PHY_EXT_CCA_THRESH62_S 16
#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_CYCPWR_THR1_S 9
#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
@@ -283,6 +285,12 @@
#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+#define AR_PHY_MRC_CCK_CTRL (AR_AGC_BASE + 0x1d0)
+#define AR_PHY_MRC_CCK_ENABLE 0x00000001
+#define AR_PHY_MRC_CCK_ENABLE_S 0
+#define AR_PHY_MRC_CCK_MUX_REG 0x00000002
+#define AR_PHY_MRC_CCK_MUX_REG_S 1
+
#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
@@ -451,7 +459,11 @@
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
-#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
+
#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
@@ -467,17 +479,63 @@
#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
-#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
-#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
-#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
-#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
-#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
-#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_1 (AR_SM_BASE + 0x1f8)
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN_S 0
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA 0x00ff0000
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_12 (AR_SM_BASE + 0x224)
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5 0x3e000000
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5_S 25
+
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0x000000ff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE 0x0000ff00
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE_S 8
+
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_19_ALPHA_VOLT 0x001f0000
+#define AR_PHY_TPC_19_ALPHA_VOLT_S 16
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN 0x00000001
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN_S 0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_S 1
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN 0x00000030
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_S 4
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA 0x00003c00
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA_S 10
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB 0x0003c000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB_S 14
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC 0x003c0000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC_S 18
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND 0x00c00000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND_S 22
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL 0x01000000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL_S 24
-#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
+
#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
@@ -490,7 +548,17 @@
#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
-#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
+
#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
@@ -660,17 +728,9 @@
#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
-#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
-#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
-#define AR_PHY_TPC_19_ALPHA_THERM 0xff
-#define AR_PHY_TPC_19_ALPHA_THERM_S 0
-
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
-#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
-#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
-
/*
* Channel 1 Register Map
*/
@@ -842,6 +902,144 @@
#define AR_PHY_WATCHDOG_STATUS_CLR 0x00000008
+/*
+ * PAPRD registers
+ */
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+
+#define AR_PHY_PAPRD_AM2AM (AR_CHAN_BASE + 0xe4)
+#define AR_PHY_PAPRD_AM2AM_MASK 0x01ffffff
+#define AR_PHY_PAPRD_AM2AM_MASK_S 0
+
+#define AR_PHY_PAPRD_AM2PM (AR_CHAN_BASE + 0xe8)
+#define AR_PHY_PAPRD_AM2PM_MASK 0x01ffffff
+#define AR_PHY_PAPRD_AM2PM_MASK_S 0
+
+#define AR_PHY_PAPRD_HT40 (AR_CHAN_BASE + 0xec)
+#define AR_PHY_PAPRD_HT40_MASK 0x01ffffff
+#define AR_PHY_PAPRD_HT40_MASK_S 0
+
+#define AR_PHY_PAPRD_CTRL0_B0 (AR_CHAN_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B1 (AR_CHAN1_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B2 (AR_CHAN2_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE 0x00000001
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE_S 0
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK 0x00000002
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK_S 1
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH 0xf8000000
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH_S 27
+
+#define AR_PHY_PAPRD_CTRL1_B0 (AR_CHAN_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B1 (AR_CHAN1_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B2 (AR_CHAN2_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA 0x00000001
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA_S 0
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE 0x00000002
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE_S 1
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE 0x00000004
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE_S 2
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL 0x000001f8
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_S 3
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK 0x0001fe00
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK_S 9
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
+
+#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + 0x490)
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_S 1
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE 0x00000100
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_S 8
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE 0x00000200
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_S 9
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE 0x00000400
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_S 10
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE 0x00000800
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_S 11
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
+
+#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + 0x494)
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
+
+#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + 0x498)
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_S 6
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL 0x0001f000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_S 12
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES 0x000e0000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_S 17
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN 0x00f00000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_S 20
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN 0x0f000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_S 24
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
+
+#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + 0x49c)
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_S 12
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR 0x00000fff
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_S 0
+
+#define AR_PHY_PAPRD_PRE_POST_SCALE_0_B0 (AR_CHAN_BASE + 0x100)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_1_B0 (AR_CHAN_BASE + 0x104)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_2_B0 (AR_CHAN_BASE + 0x108)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_3_B0 (AR_CHAN_BASE + 0x10c)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_4_B0 (AR_CHAN_BASE + 0x110)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_5_B0 (AR_CHAN_BASE + 0x114)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_6_B0 (AR_CHAN_BASE + 0x118)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_7_B0 (AR_CHAN_BASE + 0x11c)
+#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
+#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
+
+#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + 0x4a0)
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_S 1
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR 0x00000004
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_S 2
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE 0x00000008
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_S 3
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX 0x000001f0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_S 4
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
+
+#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + 0x4a4)
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_S 16
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
+
+#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + 0x4a8)
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
+
+#define AR_PHY_PAPRD_MEM_TAB_B0 (AR_CHAN_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B1 (AR_CHAN1_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B2 (AR_CHAN2_BASE + 0x120)
+
+#define AR_PHY_PA_GAIN123_B0 (AR_CHAN_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B1 (AR_CHAN1_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B2 (AR_CHAN2_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_PA_GAIN1 0x3FF
+#define AR_PHY_PA_GAIN123_PA_GAIN1_S 0
+
+#define AR_PHY_POWERTX_RATE5 (AR_SM_BASE + 0x1d0)
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0
+
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 82aca4b..72d5e52 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -20,6 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/device.h>
#include <linux/leds.h>
+#include <linux/completion.h>
#include "debug.h"
#include "common.h"
@@ -194,6 +195,7 @@ enum ATH_AGGR_STATUS {
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
+ int axq_class;
u32 axq_qnum;
u32 *axq_link;
struct list_head axq_q;
@@ -206,7 +208,6 @@ struct ath_txq {
struct list_head txq_fifo_pending;
u8 txq_headidx;
u8 txq_tailidx;
- int pending_frames;
};
struct ath_atx_ac {
@@ -224,6 +225,8 @@ struct ath_buf_state {
int bfs_tidno;
int bfs_retries;
u8 bf_type;
+ u8 bfs_paprd;
+ unsigned long bfs_paprd_timestamp;
u32 bfs_keyix;
enum ath9k_key_type bfs_keytype;
};
@@ -244,7 +247,6 @@ struct ath_buf {
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
struct ath_wiphy *aphy;
- struct ath_txq *txq;
};
struct ath_atx_tid {
@@ -281,6 +283,7 @@ struct ath_tx_control {
struct ath_txq *txq;
int if_id;
enum ath9k_internal_frame_type frame_type;
+ u8 paprd;
};
#define ATH_TX_ERROR 0x01
@@ -290,11 +293,12 @@ struct ath_tx_control {
struct ath_tx {
u16 seq_no;
u32 txqsetup;
- int hwq_map[ATH9K_WME_AC_VO+1];
+ int hwq_map[WME_NUM_AC];
spinlock_t txbuflock;
struct list_head txbuf;
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
+ int pending_frames[WME_NUM_AC];
};
struct ath_rx_edma {
@@ -417,10 +421,14 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
-#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+
+void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
/**********/
@@ -511,6 +519,7 @@ void ath_deinit_leds(struct ath_softc *sc);
#define SC_OP_TSF_RESET BIT(11)
#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
#define SC_OP_BT_SCAN BIT(13)
+#define SC_OP_ANI_RUN BIT(14)
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
@@ -552,6 +561,8 @@ struct ath_softc {
spinlock_t sc_serial_rw;
spinlock_t sc_pm_lock;
struct mutex mutex;
+ struct work_struct paprd_work;
+ struct completion paprd_complete;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
@@ -610,7 +621,6 @@ struct ath_wiphy {
void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
-int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
int ath_cabq_update(struct ath_softc *);
@@ -621,13 +631,12 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
extern struct ieee80211_ops ath9k_ops;
extern int modparam_nohwcrypt;
+extern int led_blink;
irqreturn_t ath_isr(int irq, void *dev);
int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
-const char *ath_mac_bb_name(u32 mac_bb_version);
-const char *ath_rf_name(u16 rf_version);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
@@ -678,8 +687,6 @@ void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
-
void ath_start_rfkill_poll(struct ath_softc *sc);
extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index f43d85a..4d4b22d 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -38,8 +38,7 @@ int ath_beaconq_config(struct ath_softc *sc)
qi.tqi_cwmax = 0;
} else {
/* Adhoc mode; important thing is to use 2x cwmin. */
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA,
- ATH9K_WME_AC_BE);
+ qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index a127bdb..54aae93 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -630,10 +630,10 @@ static const struct file_operations fops_wiphy = {
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
} while(0)
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -956,6 +956,10 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, sc, &fops_regval))
goto err;
+ if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
+ goto err;
+
sc->debug.regidx = 0;
return 0;
err:
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 7da7d73..bdd8aa0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -263,7 +263,8 @@ enum eeprom_param {
EEP_PWR_TABLE_OFFSET,
EEP_DRIVE_STRENGTH,
EEP_INTERNAL_REGULATOR,
- EEP_SWREG
+ EEP_SWREG,
+ EEP_PAPRD,
};
enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 0ee75e7..3a8ee99 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -76,7 +76,8 @@ static void ath_led_brightness(struct led_classdev *led_cdev,
case LED_FULL:
if (led->led_type == ATH_LED_ASSOC) {
sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(sc->hw,
+ if (led_blink)
+ ieee80211_queue_delayed_work(sc->hw,
&sc->ath_led_blink_work, 0);
} else if (led->led_type == ATH_LED_RADIO) {
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
@@ -143,7 +144,8 @@ void ath_init_leds(struct ath_softc *sc)
/* LED off, active low */
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+ if (led_blink)
+ INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
trigger = ieee80211_get_radio_led_name(sc->hw);
snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
@@ -180,7 +182,8 @@ void ath_init_leds(struct ath_softc *sc)
return;
fail:
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ if (led_blink)
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
ath_deinit_leds(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5f3ea70..ad9134b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -16,10 +16,27 @@
#include "htc.h"
+/* identify firmware images */
+#define FIRMWARE_AR7010 "ar7010.fw"
+#define FIRMWARE_AR7010_1_1 "ar7010_1_1.fw"
+#define FIRMWARE_AR9271 "ar9271.fw"
+
+MODULE_FIRMWARE(FIRMWARE_AR7010);
+MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
+MODULE_FIRMWARE(FIRMWARE_AR9271);
+
static struct usb_device_id ath9k_hif_usb_ids[] = {
- { USB_DEVICE(0x0cf3, 0x9271) },
- { USB_DEVICE(0x0cf3, 0x1006) },
- { USB_DEVICE(0x0cf3, 0x7010) },
+ { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
+ { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
+ { USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */
+ { USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */
+ { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
+ { USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */
+ { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
+ { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
+ { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
+ { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
+ { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
{ },
};
@@ -879,17 +896,15 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
/* Find out which firmware to load */
switch(hif_dev->device_id) {
- case 0x9271:
- case 0x1006:
- hif_dev->fw_name = "ar9271.fw";
- break;
case 0x7010:
+ case 0x9018:
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
- hif_dev->fw_name = "ar7010_1_1.fw";
+ hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
- hif_dev->fw_name = "ar7010.fw";
+ hif_dev->fw_name = FIRMWARE_AR7010;
break;
default:
+ hif_dev->fw_name = FIRMWARE_AR9271;
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 051b8d8..3756400 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -223,15 +223,6 @@ struct ath9k_htc_sta {
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
};
-struct ath9k_htc_aggr_work {
- u16 tid;
- u8 sta_addr[ETH_ALEN];
- struct ieee80211_hw *hw;
- struct ieee80211_vif *vif;
- enum ieee80211_ampdu_mlme_action action;
- struct mutex mutex;
-};
-
#define ATH9K_HTC_RXBUF 256
#define HTC_RX_FRAME_HEADER_SIZE 40
@@ -296,6 +287,7 @@ struct ath9k_debug {
#define ATH_LED_PIN_DEF 1
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9271 15
+#define ATH_LED_PIN_7010 12
#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
@@ -331,11 +323,10 @@ struct htc_beacon_config {
#define OP_LED_ON BIT(4)
#define OP_PREAMBLE_SHORT BIT(5)
#define OP_PROTECT_ENABLE BIT(6)
-#define OP_TXAGGR BIT(7)
-#define OP_ASSOCIATED BIT(8)
-#define OP_ENABLE_BEACON BIT(9)
-#define OP_LED_DEINIT BIT(10)
-#define OP_UNPLUGGED BIT(11)
+#define OP_ASSOCIATED BIT(7)
+#define OP_ENABLE_BEACON BIT(8)
+#define OP_LED_DEINIT BIT(9)
+#define OP_UNPLUGGED BIT(10)
struct ath9k_htc_priv {
struct device *dev;
@@ -376,8 +367,6 @@ struct ath9k_htc_priv {
struct ath9k_htc_rx rx;
struct tasklet_struct tx_tasklet;
struct sk_buff_head tx_queue;
- struct ath9k_htc_aggr_work aggr_work;
- struct delayed_work ath9k_aggr_work;
struct delayed_work ath9k_ani_work;
struct work_struct ps_work;
@@ -398,7 +387,7 @@ struct ath9k_htc_priv {
int beaconq;
int cabq;
- int hwq_map[ATH9K_WME_AC_VO+1];
+ int hwq_map[WME_NUM_AC];
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
struct ath9k_debug debug;
@@ -431,8 +420,7 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv);
void ath9k_tx_tasklet(unsigned long data);
int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
-bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
- enum ath9k_tx_queue_subtype qtype);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv);
int get_hw_qnum(u16 queue, int *hwq_map);
int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 12a3bb0..bd1506e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -227,7 +227,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
struct ath9k_tx_queue_info qi, qi_be;
- int qnum = priv->hwq_map[ATH9K_WME_AC_BE];
+ int qnum = priv->hwq_map[WME_AC_BE];
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 7339439..148b433 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -244,17 +244,12 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
*/
switch(devid) {
- case 0x9271:
- case 0x1006:
- priv->htc->credits = 33;
- break;
case 0x7010:
+ case 0x9018:
priv->htc->credits = 45;
break;
default:
- dev_err(priv->dev, "ath9k_htc: Unsupported device id: 0x%x\n",
- devid);
- goto err;
+ priv->htc->credits = 33;
}
ret = htc_init(priv->htc);
@@ -521,23 +516,23 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BE traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BK traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VI traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VO traffic\n");
goto err;
@@ -569,36 +564,6 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
*/
for (i = 0; i < common->keymax; i++)
ath9k_hw_keyreset(priv->ah, (u16) i);
-
- if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
- 1, 1, NULL);
}
static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
@@ -636,7 +601,6 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
- priv->op_flags |= OP_TXAGGR;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
@@ -668,14 +632,12 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx_lock);
mutex_init(&priv->mutex);
- mutex_init(&priv->aggr_work.mutex);
mutex_init(&priv->htc_pm_lock);
tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
(unsigned long)priv);
tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
(unsigned long)priv);
tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
- INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 7aefbc6..e38ca66 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -27,13 +27,11 @@ static struct dentry *ath9k_debugfs_root;
static void ath_update_txpow(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
- u32 txpow;
if (priv->curtxpow != priv->txpowlimit) {
ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
/* read back in case value is clamped */
- ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
- priv->curtxpow = txpow;
+ priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
}
}
@@ -364,11 +362,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
trate->rates.ht_rates.rs_nrates = j;
caps = WLAN_RC_HT_FLAG;
- if (priv->ah->caps.tx_chainmask != 1 &&
- ath9k_hw_getcapability(priv->ah, ATH9K_CAP_DS, 0, NULL)) {
- if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- }
+ if (sta->ht_cap.mcs.rx_mask[1])
+ caps |= WLAN_RC_DS_FLAG;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
caps |= WLAN_RC_40_FLAG;
if (conf_is_ht40(&priv->hw->conf) &&
@@ -443,13 +438,13 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
bss_conf->bssid, be32_to_cpu(trate.capflags));
}
-static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
- struct ieee80211_vif *vif,
- u8 *sta_addr, u8 tid, bool oper)
+int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_ampdu_mlme_action action, u16 tid)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_target_aggr aggr;
- struct ieee80211_sta *sta = NULL;
struct ath9k_htc_sta *ista;
int ret = 0;
u8 cmd_rsp;
@@ -458,72 +453,28 @@ static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
return -EINVAL;
memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
-
- rcu_read_lock();
-
- /* Check if we are able to retrieve the station */
- sta = ieee80211_find_sta(vif, sta_addr);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (oper)
- ista->tid_state[tid] = AGGR_START;
- else
- ista->tid_state[tid] = AGGR_STOP;
-
aggr.sta_index = ista->index;
-
- rcu_read_unlock();
-
- aggr.tidno = tid;
- aggr.aggr_enable = oper;
+ aggr.tidno = tid & 0xf;
+ aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false;
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
ath_print(common, ATH_DBG_CONFIG,
"Unable to %s TX aggregation for (%pM, %d)\n",
- (oper) ? "start" : "stop", sta->addr, tid);
+ (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
else
ath_print(common, ATH_DBG_CONFIG,
- "%s aggregation for (%pM, %d)\n",
- (oper) ? "Starting" : "Stopping", sta->addr, tid);
-
- return ret;
-}
-
-void ath9k_htc_aggr_work(struct work_struct *work)
-{
- int ret = 0;
- struct ath9k_htc_priv *priv =
- container_of(work, struct ath9k_htc_priv,
- ath9k_aggr_work.work);
- struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
-
- mutex_lock(&wk->mutex);
+ "%s TX aggregation for (%pM, %d)\n",
+ (aggr.aggr_enable) ? "Starting" : "Stopping",
+ sta->addr, tid);
- switch (wk->action) {
- case IEEE80211_AMPDU_TX_START:
- ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
- wk->tid, true);
- if (!ret)
- ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
- wk->tid);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
- wk->tid, false);
- ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
- break;
- default:
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
- "Unknown AMPDU action\n");
- }
+ spin_lock_bh(&priv->tx_lock);
+ ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
+ spin_unlock_bh(&priv->tx_lock);
- mutex_unlock(&wk->mutex);
+ return ret;
}
/*********/
@@ -980,6 +931,8 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
priv->ah->led_pin = ATH_LED_PIN_9287;
else if (AR_SREV_9271(priv->ah))
priv->ah->led_pin = ATH_LED_PIN_9271;
+ else if (AR_DEVID_7010(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_7010;
else
priv->ah->led_pin = ATH_LED_PIN_DEF;
@@ -1271,7 +1224,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
/* Cancel all the running timers/work .. */
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
- cancel_delayed_work_sync(&priv->ath9k_aggr_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
ath9k_led_stop_brightness(priv);
@@ -1590,7 +1542,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
}
if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
- (qnum == priv->hwq_map[ATH9K_WME_AC_BE]))
+ (qnum == priv->hwq_map[WME_AC_BE]))
ath9k_htc_beaconq_config(priv);
out:
ath9k_htc_ps_restore(priv);
@@ -1772,8 +1724,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
u16 tid, u16 *ssn)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_aggr_work *work = &priv->aggr_work;
struct ath9k_htc_sta *ista;
+ int ret = 0;
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -1781,26 +1733,26 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+ if (!ret)
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
case IEEE80211_AMPDU_TX_STOP:
- if (!(priv->op_flags & OP_TXAGGR))
- return -ENOTSUPP;
- memcpy(work->sta_addr, sta->addr, ETH_ALEN);
- work->hw = hw;
- work->vif = vif;
- work->action = action;
- work->tid = tid;
- ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+ ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ spin_lock_bh(&priv->tx_lock);
ista->tid_state[tid] = AGGR_OPERATIONAL;
+ spin_unlock_bh(&priv->tx_lock);
break;
default:
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
"Unknown AMPDU action\n");
}
- return 0;
+ return ret;
}
static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index f0cca4e..bd0b4ac 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -34,15 +34,15 @@ int get_hw_qnum(u16 queue, int *hwq_map)
{
switch (queue) {
case 0:
- return hwq_map[ATH9K_WME_AC_VO];
+ return hwq_map[WME_AC_VO];
case 1:
- return hwq_map[ATH9K_WME_AC_VI];
+ return hwq_map[WME_AC_VI];
case 2:
- return hwq_map[ATH9K_WME_AC_BE];
+ return hwq_map[WME_AC_BE];
case 3:
- return hwq_map[ATH9K_WME_AC_BK];
+ return hwq_map[WME_AC_BK];
default:
- return hwq_map[ATH9K_WME_AC_BE];
+ return hwq_map[WME_AC_BE];
}
}
@@ -187,6 +187,19 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
return htc_send(priv->htc, skb, epid, &tx_ctl);
}
+static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_sta *ista, u8 tid)
+{
+ bool ret = false;
+
+ spin_lock_bh(&priv->tx_lock);
+ if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
+ ret = true;
+ spin_unlock_bh(&priv->tx_lock);
+
+ return ret;
+}
+
void ath9k_tx_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
@@ -216,8 +229,7 @@ void ath9k_tx_tasklet(unsigned long data)
/* Check if we need to start aggregation */
if (sta && conf_is_ht(&priv->hw->conf) &&
- (priv->op_flags & OP_TXAGGR)
- && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 *qc, tid;
struct ath9k_htc_sta *ista;
@@ -226,10 +238,11 @@ void ath9k_tx_tasklet(unsigned long data)
tid = qc[0] & 0xf;
ista = (struct ath9k_htc_sta *)sta->drv_priv;
- if ((tid < ATH9K_HTC_MAX_TID) &&
- ista->tid_state[tid] == AGGR_STOP) {
+ if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
ieee80211_start_tx_ba_session(sta, tid);
+ spin_lock_bh(&priv->tx_lock);
ista->tid_state[tid] = AGGR_PROGRESS;
+ spin_unlock_bh(&priv->tx_lock);
}
}
}
@@ -297,8 +310,7 @@ void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
}
-bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
- enum ath9k_tx_queue_subtype subtype)
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
{
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -404,9 +416,6 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
/* configure operational mode */
ath9k_hw_setopmode(ah);
- /* Handle any link-level address change. */
- ath9k_hw_setmac(ah, common->macaddr);
-
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -416,7 +425,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
- ath9k_hw_startpcureceive(priv->ah);
+ ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 624422a..381da6c 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -128,6 +128,17 @@ static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
}
+static inline void ath9k_hw_procmibevent(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->ani_proc_mib_event(ah);
+}
+
+static inline void ath9k_hw_ani_monitor(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_ops(ah)->ani_monitor(ah, chan);
+}
+
/* Private hardware call ops */
/* PHY ops */
@@ -277,4 +288,9 @@ static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
}
+static inline void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
+{
+ ath9k_hw_private_ops(ah)->ani_reset(ah, is_scanning);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2adc7e7..3ed5c9e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,11 +23,6 @@
#include "rc.h"
#include "ar9003_mac.h"
-#define ATH9K_CLOCK_RATE_CCK 22
-#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
-#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
-#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
-
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
MODULE_AUTHOR("Atheros Communications");
@@ -80,6 +75,15 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
}
+static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ /* You will not have this callback if using the old ANI */
+ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
/********************/
/* Helper Functions */
/********************/
@@ -371,13 +375,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
-
- /*
- * For now ANI is disabled for AR9003, it is still
- * being tested.
- */
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ah->config.enable_ani = 1;
+ ah->config.enable_ani = true;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -390,6 +388,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ht_enable = 0;
ah->config.rx_intr_mitigation = true;
+ ah->config.pcieSerDesWrite = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -427,7 +426,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->ah_flags = AH_USE_EEPROM;
ah->atim_window = 0;
- ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
+ ah->sta_id1_defaults =
+ AR_STA_ID1_CRPT_MIC_ENABLE |
+ AR_STA_ID1_MCAST_KSRCH;
ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = (u32) -1;
@@ -565,28 +566,19 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ah->ani_function = ATH9K_ANI_ALL;
if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
ath9k_hw_init_mode_regs(ah);
/*
- * Configire PCIE after Ini init. SERDES values now come from ini file
- * This enables PCIe low power mode.
+ * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * We need to do this to avoid RMW of this register. We cannot
+ * read the reg when chip is asleep.
*/
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- u32 regval;
- unsigned int i;
-
- /* Set Bits 16 and 17 in the AR_WA register. */
- regval = REG_READ(ah, AR_WA);
- regval |= 0x00030000;
- REG_WRITE(ah, AR_WA, regval);
-
- for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
- REG_WRITE(ah,
- INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
- INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
- }
- }
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1007,6 +999,11 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
ENABLE_REGWRITE_BUFFER(ah);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
@@ -1061,6 +1058,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
{
ENABLE_REGWRITE_BUFFER(ah);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
@@ -1068,6 +1070,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
+ udelay(2);
REGWRITE_BUFFER_FLUSH(ah);
DISABLE_REGWRITE_BUFFER(ah);
@@ -1097,6 +1100,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
@@ -1260,7 +1268,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* For chips on which RTC reset is done, save TSF before it gets cleared */
- if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ if (AR_SREV_9100(ah) ||
+ (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -1292,7 +1301,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
/* Restore TSF */
- if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ if (tsf)
ath9k_hw_settsf64(ah, tsf);
if (AR_SREV_9280_10_OR_LATER(ah))
@@ -1305,6 +1314,17 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ /*
+ * Some AR91xx SoC devices frequently fail to accept TSF writes
+ * right after the chip reset. When that happens, write a new
+ * value after the initvals have been applied, with an offset
+ * based on measured time difference
+ */
+ if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
+ tsf += 1500;
+ ath9k_hw_settsf64(ah, tsf);
+ }
+
/* Setup MFP options for CCMP */
if (AR_SREV_9280_20_OR_LATER(ah)) {
/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
@@ -1365,6 +1385,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_resettxqueue(ah, i);
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
+ ath9k_hw_ani_cache_ini_regs(ah);
ath9k_hw_init_qos(ah);
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
@@ -1489,7 +1510,7 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
}
EXPORT_SYMBOL(ath9k_hw_keyreset);
-bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
+static bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
{
u32 macHi, macLo;
u32 unicast_flag = AR_KEYTABLE_VALID;
@@ -1527,7 +1548,6 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
return true;
}
-EXPORT_SYMBOL(ath9k_hw_keysetmac);
bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
const struct ath9k_keyval *k,
@@ -1728,17 +1748,6 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
}
EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
-bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
-{
- if (entry < ah->caps.keycache_size) {
- u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
- if (val & AR_KEYTABLE_VALID)
- return true;
- }
- return false;
-}
-EXPORT_SYMBOL(ath9k_hw_keyisvalid);
-
/******************************/
/* Power Management (Chipset) */
/******************************/
@@ -1765,6 +1774,11 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
REG_CLR_BIT(ah, (AR_RTC_RESET),
AR_RTC_RESET_EN);
}
+
+ /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA,
+ ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
@@ -1791,6 +1805,10 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
AR_RTC_FORCE_WAKE_EN);
}
}
+
+ /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
@@ -1798,6 +1816,12 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
u32 val;
int i;
+ /* Set Bits 14 and 17 of AR_WA before powering on the chip. */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ udelay(10);
+ }
+
if (setChip) {
if ((REG_READ(ah, AR_RTC_STATUS) &
AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
@@ -2152,6 +2176,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9271(ah))
pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ else if (AR_DEVID_7010(ah))
+ pCap->num_gpio_pins = AR7010_NUM_GPIO;
else if (AR_SREV_9285_10_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_10_OR_LATER(ah))
@@ -2234,6 +2260,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->rx_status_len = sizeof(struct ar9003_rxs);
pCap->tx_desc_len = sizeof(struct ar9003_txc);
pCap->txs_len = sizeof(struct ar9003_txs);
+ if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
} else {
pCap->tx_desc_len = sizeof(struct ath_desc);
if (AR_SREV_9280_20(ah) &&
@@ -2252,98 +2280,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
return 0;
}
-bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 *result)
-{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- switch (type) {
- case ATH9K_CAP_CIPHER:
- switch (capability) {
- case ATH9K_CIPHER_AES_CCM:
- case ATH9K_CIPHER_AES_OCB:
- case ATH9K_CIPHER_TKIP:
- case ATH9K_CIPHER_WEP:
- case ATH9K_CIPHER_MIC:
- case ATH9K_CIPHER_CLR:
- return true;
- default:
- return false;
- }
- case ATH9K_CAP_TKIP_MIC:
- switch (capability) {
- case 0:
- return true;
- case 1:
- return (ah->sta_id1_defaults &
- AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
- false;
- }
- case ATH9K_CAP_TKIP_SPLIT:
- return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
- false : true;
- case ATH9K_CAP_MCAST_KEYSRCH:
- switch (capability) {
- case 0:
- return true;
- case 1:
- if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
- return false;
- } else {
- return (ah->sta_id1_defaults &
- AR_STA_ID1_MCAST_KSRCH) ? true :
- false;
- }
- }
- return false;
- case ATH9K_CAP_TXPOW:
- switch (capability) {
- case 0:
- return 0;
- case 1:
- *result = regulatory->power_limit;
- return 0;
- case 2:
- *result = regulatory->max_power_level;
- return 0;
- case 3:
- *result = regulatory->tp_scale;
- return 0;
- }
- return false;
- case ATH9K_CAP_DS:
- return (AR_SREV_9280_20_OR_LATER(ah) &&
- (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1))
- ? false : true;
- default:
- return false;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_getcapability);
-
-bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 setting, int *status)
-{
- switch (type) {
- case ATH9K_CAP_TKIP_MIC:
- if (setting)
- ah->sta_id1_defaults |=
- AR_STA_ID1_CRPT_MIC_ENABLE;
- else
- ah->sta_id1_defaults &=
- ~AR_STA_ID1_CRPT_MIC_ENABLE;
- return true;
- case ATH9K_CAP_MCAST_KEYSRCH:
- if (setting)
- ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
- else
- ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH;
- return true;
- default:
- return false;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_setcapability);
-
/****************************/
/* GPIO / RFKILL / Antennae */
/****************************/
@@ -2382,8 +2318,15 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
BUG_ON(gpio >= ah->caps.num_gpio_pins);
- gpio_shift = gpio << 1;
+ if (AR_DEVID_7010(ah)) {
+ gpio_shift = gpio;
+ REG_RMW(ah, AR7010_GPIO_OE,
+ (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
+ (AR7010_GPIO_OE_MASK << gpio_shift));
+ return;
+ }
+ gpio_shift = gpio << 1;
REG_RMW(ah,
AR_GPIO_OE_OUT,
(AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
@@ -2399,7 +2342,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
if (gpio >= ah->caps.num_gpio_pins)
return 0xffffffff;
- if (AR_SREV_9300_20_OR_LATER(ah))
+ if (AR_DEVID_7010(ah)) {
+ u32 val;
+ val = REG_READ(ah, AR7010_GPIO_IN);
+ return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
+ } else if (AR_SREV_9300_20_OR_LATER(ah))
return MS_REG_READ(AR9300, gpio) != 0;
else if (AR_SREV_9271(ah))
return MS_REG_READ(AR9271, gpio) != 0;
@@ -2419,10 +2366,16 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
{
u32 gpio_shift;
- ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+ if (AR_DEVID_7010(ah)) {
+ gpio_shift = gpio;
+ REG_RMW(ah, AR7010_GPIO_OE,
+ (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
+ (AR7010_GPIO_OE_MASK << gpio_shift));
+ return;
+ }
+ ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
gpio_shift = 2 * gpio;
-
REG_RMW(ah,
AR_GPIO_OE_OUT,
(AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
@@ -2432,6 +2385,13 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
+ if (AR_DEVID_7010(ah)) {
+ val = val ? 0 : 1;
+ REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
+ AR_GPIO_BIT(gpio));
+ return;
+ }
+
if (AR_SREV_9271(ah))
val = ~val;
@@ -2537,12 +2497,6 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
}
EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
-void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
-{
- memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
-}
-EXPORT_SYMBOL(ath9k_hw_setmac);
-
void ath9k_hw_setopmode(struct ath_hw *ah)
{
ath9k_hw_set_operating_mode(ah, ah->opmode);
@@ -2615,21 +2569,6 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
-/*
- * Extend 15-bit time stamp from rx descriptor to
- * a full 64-bit TSF using the current h/w TSF.
-*/
-u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
-{
- u64 tsf;
-
- tsf = ath9k_hw_gettsf64(ah);
- if ((tsf & 0x7fff) < rstamp)
- tsf -= 0x8000;
- return (tsf & ~0x7fff) | rstamp;
-}
-EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 88bf2fc..bb99e2e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -158,6 +158,9 @@
#define ATH9K_HW_RX_HP_QDEPTH 16
#define ATH9K_HW_RX_LP_QDEPTH 128
+#define PAPRD_GAIN_TABLE_ENTRIES 32
+#define PAPRD_TABLE_SZ 24
+
enum ath_ini_subsys {
ATH_INI_PRE = 0,
ATH_INI_CORE,
@@ -200,15 +203,7 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_LDPC = BIT(19),
ATH9K_HW_CAP_FASTCLOCK = BIT(20),
ATH9K_HW_CAP_SGI_20 = BIT(21),
-};
-
-enum ath9k_capability_type {
- ATH9K_CAP_CIPHER = 0,
- ATH9K_CAP_TKIP_MIC,
- ATH9K_CAP_TKIP_SPLIT,
- ATH9K_CAP_TXPOW,
- ATH9K_CAP_MCAST_KEYSRCH,
- ATH9K_CAP_DS
+ ATH9K_HW_CAP_PAPRD = BIT(22),
};
struct ath9k_hw_capabilities {
@@ -238,8 +233,9 @@ struct ath9k_ops_config {
int sw_beacon_response_time;
int additional_swba_backoff;
int ack_6mb;
- int cwm_ignore_extcca;
+ u32 cwm_ignore_extcca;
u8 pcie_powersave_enable;
+ bool pcieSerDesWrite;
u8 pcie_clock_req;
u32 pcie_waen;
u8 analog_shiftreg;
@@ -266,6 +262,7 @@ struct ath9k_ops_config {
int spurmode;
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
+ u16 ani_poll_interval; /* ANI poll interval in ms */
};
enum ath9k_int {
@@ -359,6 +356,9 @@ struct ath9k_channel {
int8_t iCoff;
int8_t qCoff;
int16_t rawNoiseFloor;
+ bool paprd_done;
+ u16 small_signal_gain[AR9300_MAX_CHAINS];
+ u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
};
#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -511,6 +511,17 @@ struct ath_gen_timer_table {
* @setup_calibration: set up calibration
* @iscal_supported: used to query if a type of calibration is supported
* @loadnf: load noise floor read from each chain on the CCA registers
+ *
+ * @ani_reset: reset ANI parameters to default values
+ * @ani_lower_immunity: lower the noise immunity level. The level controls
+ * the power-based packet detection on hardware. If a power jump is
+ * detected the adapter takes it as an indication that a packet has
+ * arrived. The level ranges from 0-5. Each level corresponds to a
+ * few dB more of noise immunity. If you have a strong time-varying
+ * interference that is causing false detections (OFDM timing errors or
+ * CCK timing errors) the level can be increased.
+ * @ani_cache_ini_regs: cache the values for ANI from the initial
+ * register settings through the register initialization.
*/
struct ath_hw_private_ops {
/* Calibration ops */
@@ -554,6 +565,11 @@ struct ath_hw_private_ops {
int param);
void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ /* ANI */
+ void (*ani_reset)(struct ath_hw *ah, bool is_scanning);
+ void (*ani_lower_immunity)(struct ath_hw *ah);
+ void (*ani_cache_ini_regs)(struct ath_hw *ah);
};
/**
@@ -564,6 +580,11 @@ struct ath_hw_private_ops {
*
* @config_pci_powersave:
* @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ *
+ * @ani_proc_mib_event: process MIB events, this would happen upon specific ANI
+ * thresholds being reached or having overflowed.
+ * @ani_monitor: called periodically by the core driver to collect
+ * MIB stats and adjust ANI if specific thresholds have been reached.
*/
struct ath_hw_ops {
void (*config_pci_powersave)(struct ath_hw *ah,
@@ -604,6 +625,9 @@ struct ath_hw_ops {
u32 burstDuration);
void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
u32 vmf);
+
+ void (*ani_proc_mib_event)(struct ath_hw *ah);
+ void (*ani_monitor)(struct ath_hw *ah, struct ath9k_channel *chan);
};
struct ath_hw {
@@ -793,6 +817,15 @@ struct ath_hw {
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+
+ u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
+ u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
+ /*
+ * Store the permanent value of Reg 0x4004in WARegVal
+ * so we dont have to R/M/W. We should not be reading
+ * this register when in sleep states.
+ */
+ u32 WARegVal;
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -822,19 +855,13 @@ int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
int ath9k_hw_fill_cap_info(struct ath_hw *ah);
-bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 *result);
-bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 setting, int *status);
u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* Key Cache Management */
bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
-bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac);
bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
const struct ath9k_keyval *k,
const u8 *mac);
-bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry);
/* GPIO / RFKILL / Antennae */
void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
@@ -860,7 +887,6 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
bool ath9k_hw_phy_disable(struct ath_hw *ah);
bool ath9k_hw_disable(struct ath_hw *ah);
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
-void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -869,7 +895,6 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
-u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
@@ -922,6 +947,15 @@ void ar9003_hw_set_nf_limits(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_paprd_enable(struct ath_hw *ah, bool val);
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+ struct ath9k_channel *chan, int chain);
+int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
+ int chain);
+int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+int ar9003_paprd_init_table(struct ath_hw *ah);
+bool ar9003_paprd_is_done(struct ath_hw *ah);
+void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
/* Hardware family op attach helpers */
void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
@@ -934,8 +968,24 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
void ar9002_hw_attach_ops(struct ath_hw *ah);
void ar9003_hw_attach_ops(struct ath_hw *ah);
+/*
+ * ANI work can be shared between all families but a next
+ * generation implementation of ANI will be used only for AR9003 only
+ * for now as the other families still need to be tested with the same
+ * next generation ANI. Feel free to start testing it though for the
+ * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
+ */
+extern int modparam_force_new_ani;
+void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah);
+void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah);
+
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
+#define ATH9K_CLOCK_RATE_CCK 22
+#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
+#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 18d76ed..8700e3d 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -33,6 +33,10 @@ int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+int led_blink = 1;
+module_param_named(blink, led_blink, int, 0444);
+MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+
/* We use the hw_value as an index into our private channel structure */
#define CHAN2G(_freq, _idx) { \
@@ -379,36 +383,14 @@ static void ath9k_init_crypto(struct ath_softc *sc)
for (i = 0; i < common->keymax; i++)
ath9k_hw_keyreset(sc->sc_ah, (u16) i);
- if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
- }
-
/*
* Check whether the separate key cache entries
* are required to handle both tx+rx MIC keys.
* With split mic keys the number of stations is limited
* to 27 otherwise 59.
*/
- if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
+ if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA))
common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
- 1, 1, NULL);
-
}
static int ath9k_init_btcoex(struct ath_softc *sc)
@@ -426,7 +408,7 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
r = ath_init_btcoex_timer(sc);
if (r)
return -1;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
@@ -463,23 +445,23 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ if (!ath_tx_setup(sc, WME_AC_BK)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BK traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ if (!ath_tx_setup(sc, WME_AC_BE)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BE traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ if (!ath_tx_setup(sc, WME_AC_VI)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VI traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ if (!ath_tx_setup(sc, WME_AC_VO)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VO traffic\n");
goto err;
@@ -736,6 +718,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
goto error_world;
}
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
sc->wiphy_scheduler_int = msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 0e425cb..e955bb9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
@@ -554,8 +555,13 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REGWRITE_BUFFER_FLUSH(ah);
DISABLE_REGWRITE_BUFFER(ah);
- /* cwmin and cwmax should be 0 for beacon queue */
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ /*
+ * cwmin and cwmax should be 0 for beacon queue
+ * but not for IBSS as we would create an imbalance
+ * on beaconing fairness for participating nodes.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->opmode != NL80211_IFTYPE_ADHOC) {
REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
| SM(0, AR_D_LCL_IFS_CWMAX)
| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
@@ -756,11 +762,11 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
}
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
-void ath9k_hw_startpcureceive(struct ath_hw *ah)
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
{
ath9k_enable_mib_counters(ah);
- ath9k_ani_reset(ah);
+ ath9k_ani_reset(ah, is_scanning);
REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 00f3e0c..7559fb2b 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -577,13 +577,8 @@ enum ath9k_tx_queue {
#define ATH9K_NUM_TX_QUEUES 10
-enum ath9k_tx_queue_subtype {
- ATH9K_WME_AC_BK = 0,
- ATH9K_WME_AC_BE,
- ATH9K_WME_AC_VI,
- ATH9K_WME_AC_VO,
- ATH9K_WME_UPSD
-};
+/* Used as a queue subtype instead of a WMM AC */
+#define ATH9K_WME_UPSD 4
enum ath9k_tx_queue_flags {
TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
@@ -617,7 +612,7 @@ enum ath9k_pkt_type {
struct ath9k_tx_queue_info {
u32 tqi_ver;
enum ath9k_tx_queue tqi_type;
- enum ath9k_tx_queue_subtype tqi_subtype;
+ int tqi_subtype;
enum ath9k_tx_queue_flags tqi_qflags;
u32 tqi_priority;
u32 tqi_aifs;
@@ -715,7 +710,7 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
-void ath9k_hw_startpcureceive(struct ath_hw *ah);
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_stoppcurecv(struct ath_hw *ah);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b8b76dd..efbf535 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -51,13 +51,11 @@ static void ath_cache_conf_rate(struct ath_softc *sc,
static void ath_update_txpow(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- u32 txpow;
if (sc->curtxpow != sc->config.txpowlimit) {
ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
/* read back in case value is clamped */
- ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
- sc->curtxpow = txpow;
+ sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
}
}
@@ -232,6 +230,114 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
return r;
}
+static void ath_paprd_activate(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int chain;
+
+ if (!ah->curchan->paprd_done)
+ return;
+
+ ath9k_ps_wakeup(sc);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ ar9003_paprd_populate_single_table(ah, ah->curchan, chain);
+ }
+
+ ar9003_paprd_enable(ah, true);
+ ath9k_ps_restore(sc);
+}
+
+void ath_paprd_calibrate(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct ieee80211_tx_info *tx_info;
+ int band = hw->conf.channel->band;
+ struct ieee80211_supported_band *sband = &sc->sbands[band];
+ struct ath_tx_control txctl;
+ int qnum, ftype;
+ int chain_ok = 0;
+ int chain;
+ int len = 1800;
+ int time_left;
+ int i;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ skb_put(skb, len);
+ memset(skb->data, 0, len);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
+ hdr->frame_control = cpu_to_le16(ftype);
+ hdr->duration_id = 10;
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ memset(&txctl, 0, sizeof(txctl));
+ qnum = sc->tx.hwq_map[WME_AC_BE];
+ txctl.txq = &sc->tx.txq[qnum];
+
+ ath9k_ps_wakeup(sc);
+ ar9003_paprd_init_table(ah);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ chain_ok = 0;
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = band;
+
+ for (i = 0; i < 4; i++) {
+ tx_info->control.rates[i].idx = sband->n_bitrates - 1;
+ tx_info->control.rates[i].count = 6;
+ }
+
+ init_completion(&sc->paprd_complete);
+ ar9003_paprd_setup_gain_table(ah, chain);
+ txctl.paprd = BIT(chain);
+ if (ath_tx_start(hw, skb, &txctl) != 0)
+ break;
+
+ time_left = wait_for_completion_timeout(&sc->paprd_complete,
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
+ if (!time_left) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "Timeout waiting for paprd training on "
+ "TX chain %d\n",
+ chain);
+ goto fail_paprd;
+ }
+
+ if (!ar9003_paprd_is_done(ah))
+ break;
+
+ if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0)
+ break;
+
+ chain_ok = 1;
+ }
+ kfree_skb(skb);
+
+ if (chain_ok) {
+ ah->curchan->paprd_done = true;
+ ath_paprd_activate(sc);
+ }
+
+fail_paprd:
+ ath9k_ps_restore(sc);
+}
+
/*
* This routine performs the periodic noise floor calibration function
* that is used to adjust and optimize the chip performance. This
@@ -285,7 +391,8 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if ((timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -326,23 +433,37 @@ set_timer:
*/
cal_interval = ATH_LONG_CALINTERVAL;
if (sc->sc_ah->config.enable_ani)
- cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ cal_interval = min(cal_interval,
+ (u32)ah->config.ani_poll_interval);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) &&
+ !(sc->sc_flags & SC_OP_SCANNING)) {
+ if (!sc->sc_ah->curchan->paprd_done)
+ ieee80211_queue_work(sc->hw, &sc->paprd_work);
+ else
+ ath_paprd_activate(sc);
+ }
}
static void ath_start_ani(struct ath_common *common)
{
+ struct ath_hw *ah = common->ah;
unsigned long timestamp = jiffies_to_msecs(jiffies);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (!(sc->sc_flags & SC_OP_ANI_RUN))
+ return;
common->ani.longcal_timer = timestamp;
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
mod_timer(&common->ani.timer,
- jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+ jiffies +
+ msecs_to_jiffies((u32)ah->config.ani_poll_interval));
}
/*
@@ -650,11 +771,13 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
/* Reset rssi stats */
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
} else {
ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
common->curaid = 0;
/* Stop ANI */
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
del_timer_sync(&common->ani.timer);
}
}
@@ -804,25 +927,25 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
-int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
+static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
switch (queue) {
case 0:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
+ qnum = sc->tx.hwq_map[WME_AC_VO];
break;
case 1:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
+ qnum = sc->tx.hwq_map[WME_AC_VI];
break;
case 2:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+ qnum = sc->tx.hwq_map[WME_AC_BE];
break;
case 3:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
+ qnum = sc->tx.hwq_map[WME_AC_BK];
break;
default:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+ qnum = sc->tx.hwq_map[WME_AC_BE];
break;
}
@@ -834,16 +957,16 @@ int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
int qnum;
switch (queue) {
- case ATH9K_WME_AC_VO:
+ case WME_AC_VO:
qnum = 0;
break;
- case ATH9K_WME_AC_VI:
+ case WME_AC_VI:
qnum = 1;
break;
- case ATH9K_WME_AC_BE:
+ case WME_AC_BE:
qnum = 2;
break;
- case ATH9K_WME_AC_BK:
+ case WME_AC_BK:
qnum = 3;
break;
default:
@@ -1125,8 +1248,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
aphy->state = ATH_WIPHY_INACTIVE;
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ if (led_blink)
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
+
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_work_sync(&sc->paprd_work);
if (!sc->num_sec_wiphy) {
cancel_delayed_work_sync(&sc->wiphy_work);
@@ -1257,8 +1383,10 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MONITOR)
+ vif->type == NL80211_IFTYPE_MONITOR) {
+ sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
+ }
out:
mutex_unlock(&sc->mutex);
@@ -1279,6 +1407,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
/* Stop ANI */
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
del_timer_sync(&common->ani.timer);
/* Reclaim beacon resources */
@@ -1555,7 +1684,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
- if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret)
+ if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
@@ -1769,6 +1898,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ath_softc *sc = aphy->sc;
int ret = 0;
+ local_bh_disable();
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
if (!(sc->sc_flags & SC_OP_RXAGGR))
@@ -1798,6 +1929,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
"Unknown AMPDU action\n");
}
+ local_bh_enable();
+
return ret;
}
@@ -1842,6 +1975,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
ath9k_wiphy_pause_all_forced(sc, aphy);
sc->sc_flags |= SC_OP_SCANNING;
del_timer_sync(&common->ani.timer);
+ cancel_work_sync(&sc->paprd_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 1ec836c..257b10b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 02b6052..600ee0b 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1203,11 +1203,8 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
if (sta->ht_cap.ht_supported) {
caps = WLAN_RC_HT_FLAG;
- if (sc->sc_ah->caps.tx_chainmask != 1 &&
- ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) {
- if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- }
+ if (sta->ht_cap.mcs.rx_mask[1])
+ caps |= WLAN_RC_DS_FLAG;
if (is_cw40)
caps |= WLAN_RC_40_FLAG;
if (is_sgi)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d373364..da0cfe9 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -116,9 +116,6 @@ static void ath_opmode_init(struct ath_softc *sc)
/* configure operational mode */
ath9k_hw_setopmode(ah);
- /* Handle any link-level address change. */
- ath9k_hw_setmac(ah, common->macaddr);
-
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -295,7 +292,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(sc->sc_ah);
+ ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
}
static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -501,7 +498,7 @@ int ath_startrecv(struct ath_softc *sc)
start_recv:
spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(ah);
+ ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
return 0;
}
@@ -1002,8 +999,6 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
struct ieee80211_rx_status *rx_status,
bool *decrypt_error)
{
- struct ath_hw *ah = common->ah;
-
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
/*
@@ -1018,7 +1013,6 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
if (ath9k_process_rate(common, hw, rx_stats, rx_status))
return -EINVAL;
- rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
@@ -1100,6 +1094,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int dma_type;
u8 rx_status_len = ah->caps.rx_status_len;
+ u64 tsf = 0;
+ u32 tsf_lower = 0;
if (edma)
dma_type = DMA_BIDIRECTIONAL;
@@ -1109,6 +1105,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
spin_lock_bh(&sc->rx.rxbuflock);
+ tsf = ath9k_hw_gettsf64(ah);
+ tsf_lower = tsf & 0xffffffff;
+
do {
/* If handling rx interrupt and flush is in progress => exit */
if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
@@ -1141,6 +1140,15 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (flush)
goto requeue;
+ rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
+ if (rs.rs_tstamp > tsf_lower &&
+ unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
+ rxs->mactime -= 0x100000000ULL;
+
+ if (rs.rs_tstamp < tsf_lower &&
+ unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
+ rxs->mactime += 0x100000000ULL;
+
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 3e3ccef..633e3d9 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -704,6 +704,11 @@
#define AR_WA_BIT7 (1 << 7)
#define AR_WA_BIT23 (1 << 23)
#define AR_WA_D3_L1_DISABLE (1 << 14)
+#define AR_WA_D3_TO_L1_DISABLE_REAL (1 << 16)
+#define AR_WA_ASPM_TIMER_BASED_DISABLE (1 << 17)
+#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
+#define AR_WA_ANALOG_SHIFT (1 << 20)
+#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@@ -877,6 +882,7 @@
#define AR_SREV_9271_11(_ah) \
(AR_SREV_9271(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
+
#define AR_SREV_9300(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
#define AR_SREV_9300_20(_ah) \
@@ -891,6 +897,10 @@
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
+#define AR_DEVID_7010(_ah) \
+ (((_ah)->hw_version.devid == 0x7010) || \
+ ((_ah)->hw_version.devid == 0x9018))
+
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
#define AR_RAD2133_SREV_MAJOR 0xd0
@@ -988,6 +998,7 @@ enum {
#define AR9287_NUM_GPIO 11
#define AR9271_NUM_GPIO 16
#define AR9300_NUM_GPIO 17
+#define AR7010_NUM_GPIO 16
#define AR_GPIO_IN_OUT 0x4048
#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -1002,6 +1013,8 @@ enum {
#define AR9271_GPIO_IN_VAL_S 16
#define AR9300_GPIO_IN_VAL 0x0001FFFF
#define AR9300_GPIO_IN_VAL_S 0
+#define AR7010_GPIO_IN_VAL 0x0000FFFF
+#define AR7010_GPIO_IN_VAL_S 0
#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
@@ -1010,6 +1023,21 @@ enum {
#define AR_GPIO_OE_OUT_DRV_HI 0x2
#define AR_GPIO_OE_OUT_DRV_ALL 0x3
+#define AR7010_GPIO_OE 0x52000
+#define AR7010_GPIO_OE_MASK 0x1
+#define AR7010_GPIO_OE_AS_OUTPUT 0x0
+#define AR7010_GPIO_OE_AS_INPUT 0x1
+#define AR7010_GPIO_IN 0x52004
+#define AR7010_GPIO_OUT 0x52008
+#define AR7010_GPIO_SET 0x5200C
+#define AR7010_GPIO_CLEAR 0x52010
+#define AR7010_GPIO_INT 0x52014
+#define AR7010_GPIO_INT_TYPE 0x52018
+#define AR7010_GPIO_INT_POLARITY 0x5201C
+#define AR7010_GPIO_PENDING 0x52020
+#define AR7010_GPIO_INT_MASK 0x52024
+#define AR7010_GPIO_FUNCTION 0x52028
+
#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
#define AR_GPIO_INTR_POL_VAL_S 0
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 105ad40..89423ca 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -219,7 +219,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
info->control.rates[1].idx = -1;
memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
+ txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 7547c8f..c3681a1 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -328,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
+ struct ieee80211_tx_rate rates[4];
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -335,6 +336,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
hw = bf->aphy->hw;
+ memcpy(rates, tx_info->control.rates, sizeof(rates));
+
rcu_read_lock();
/* XXX: use ieee80211_find_sta! */
@@ -375,6 +378,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
txfail = txpending = 0;
bf_next = bf->bf_next;
+ skb = bf->bf_mpdu;
+ tx_info = IEEE80211_SKB_CB(skb);
+
if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
/* transmit completion, subframe is
* acked by block ack */
@@ -428,6 +434,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
+ memcpy(tx_info->control.rates, rates, sizeof(rates));
ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
@@ -941,6 +948,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
if (!ATH_TXQ_SETUP(sc, qnum)) {
struct ath_txq *txq = &sc->tx.txq[qnum];
+ txq->axq_class = subtype;
txq->axq_qnum = qnum;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
@@ -958,32 +966,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
return &sc->tx.txq[qnum];
}
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
-{
- int qnum;
-
- switch (qtype) {
- case ATH9K_TX_QUEUE_DATA:
- if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->tx.hwq_map));
- return -1;
- }
- qnum = sc->tx.hwq_map[haltype];
- break;
- case ATH9K_TX_QUEUE_BEACON:
- qnum = sc->beacon.beaconq;
- break;
- case ATH9K_TX_QUEUE_CAB:
- qnum = sc->beacon.cabq->axq_qnum;
- break;
- default:
- qnum = -1;
- }
- return qnum;
-}
-
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *qinfo)
{
@@ -1662,12 +1644,15 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf)) {
+ if (!txctl->paprd && conf_is_ht(&hw->conf)) {
bf->bf_state.bf_type |= BUF_HT;
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
use_ldpc = true;
}
+ bf->bf_state.bfs_paprd = txctl->paprd;
+ if (txctl->paprd)
+ bf->bf_state.bfs_paprd_timestamp = jiffies;
bf->bf_flags = setup_tx_flags(skb, use_ldpc);
bf->bf_keytype = get_hw_crypto_keytype(skb);
@@ -1742,6 +1727,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
bf->bf_buf_addr,
txctl->txq->axq_qnum);
+ if (bf->bf_state.bfs_paprd)
+ ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
+
spin_lock_bh(&txctl->txq->axq_lock);
if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
@@ -1785,7 +1773,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
- int r;
+ int q, r;
bf = ath_tx_get_buffer(sc);
if (!bf) {
@@ -1793,14 +1781,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
return -1;
}
- bf->txq = txctl->txq;
- spin_lock_bh(&bf->txq->axq_lock);
- if (++bf->txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
- ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
- txq->stopped = 1;
- }
- spin_unlock_bh(&bf->txq->axq_lock);
-
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
if (unlikely(r)) {
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
@@ -1821,6 +1801,17 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
return r;
}
+ q = skb_get_queue_mapping(skb);
+ if (q >= 4)
+ q = 0;
+
+ spin_lock_bh(&txq->axq_lock);
+ if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
+ ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+ txq->stopped = 1;
+ }
+ spin_unlock_bh(&txq->axq_lock);
+
ath_tx_start_dma(sc, bf, txctl);
return 0;
@@ -1890,7 +1881,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
- int padpos, padsize;
+ int q, padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@@ -1929,8 +1920,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
ath9k_tx_status(hw, skb);
- else
+ else {
+ q = skb_get_queue_mapping(skb);
+ if (q >= 4)
+ q = 0;
+
+ if (--sc->tx.pending_frames[q] < 0)
+ sc->tx.pending_frames[q] = 0;
+
ieee80211_tx_status(hw, skb);
+ }
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1951,16 +1950,19 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
tx_flags |= ATH_TX_XRETRY;
}
- if (bf->txq) {
- spin_lock_bh(&bf->txq->axq_lock);
- bf->txq->pending_frames--;
- spin_unlock_bh(&bf->txq->axq_lock);
- bf->txq = NULL;
- }
-
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
- ath_tx_complete(sc, skb, bf->aphy, tx_flags);
- ath_debug_stat_tx(sc, txq, bf, ts);
+
+ if (bf->bf_state.bfs_paprd) {
+ if (time_after(jiffies,
+ bf->bf_state.bfs_paprd_timestamp +
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
+ dev_kfree_skb_any(skb);
+ else
+ complete(&sc->paprd_complete);
+ } else {
+ ath_tx_complete(sc, skb, bf->aphy, tx_flags);
+ ath_debug_stat_tx(sc, txq, bf, ts);
+ }
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -2038,20 +2040,21 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
tx_info->status.rates[i].idx = -1;
}
- tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
{
int qnum;
+ qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
+ if (qnum == -1)
+ return;
+
spin_lock_bh(&txq->axq_lock);
- if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
- qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
- if (qnum != -1) {
- ath_mac80211_start_queue(sc, qnum);
- txq->stopped = 0;
- }
+ if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
+ ath_mac80211_start_queue(sc, qnum);
+ txq->stopped = 0;
}
spin_unlock_bh(&txq->axq_lock);
}
@@ -2148,7 +2151,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
- bf->bf_retries = ts.ts_longretry;
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
ath_tx_rc_status(bf, &ts, 0, txok, true);
@@ -2278,7 +2280,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
if (!bf_isampdu(bf)) {
- bf->bf_retries = txs.ts_longretry;
if (txs.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
ath_tx_rc_status(bf, &txs, 0, txok, true);
@@ -2422,26 +2423,8 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < WME_NUM_AC; acno++, ac++) {
ac->sched = false;
+ ac->qnum = sc->tx.hwq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
-
- switch (acno) {
- case WME_AC_BE:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- break;
- case WME_AC_BK:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
- break;
- case WME_AC_VI:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
- break;
- case WME_AC_VO:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
- break;
- }
}
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7965b70..8e24379 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1804,7 +1804,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43err(dev->wl, "This device does not support DMA "
- "on your system. Please use PIO instead.\n");
+ "on your system. It will now be switched to PIO.\n");
/* Fall back to PIO transfers if we get fatal DMA errors! */
dev->use_pio = 1;
b43_controller_restart(dev, "DMA error");
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 4e56b7b..45933cf 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -182,6 +182,7 @@ static void b43_sdio_remove(struct sdio_func *func)
static const struct sdio_device_id b43_sdio_ids[] = {
{ SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
+ { SDIO_DEVICE(0x0092, 0x0004) }, /* C-guys, Inc. EW-CG1102GC */
{ },
};
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 231dbd7..9cadaa2 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -688,7 +688,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
struct ap_data *ap = data;
struct net_device *dev = ap->local->dev;
struct ieee80211_hdr *hdr;
- u16 fc, status;
+ u16 status;
__le16 *pos;
struct sta_info *sta = NULL;
char *txt = NULL;
@@ -699,7 +699,6 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
}
hdr = (struct ieee80211_hdr *) skb->data;
- fc = le16_to_cpu(hdr->frame_control);
if ((!ieee80211_is_assoc_resp(hdr->frame_control) &&
!ieee80211_is_reassoc_resp(hdr->frame_control)) ||
skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index db72461..29b31a6 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -594,6 +594,7 @@ static int prism2_config(struct pcmcia_device *link)
local_info_t *local;
int ret = 1;
struct hostap_cs_priv *hw_priv;
+ unsigned long flags;
PDEBUG(DEBUG_FLOW, "prism2_config()\n");
@@ -625,9 +626,15 @@ static int prism2_config(struct pcmcia_device *link)
local->hw_priv = hw_priv;
hw_priv->link = link;
+ /*
+ * Make sure the IRQ handler cannot proceed until at least
+ * dev->base_addr is initialized.
+ */
+ spin_lock_irqsave(&local->irq_init_lock, flags);
+
ret = pcmcia_request_irq(link, prism2_interrupt);
if (ret)
- goto failed;
+ goto failed_unlock;
/*
* This actually configures the PCMCIA socket -- setting up
@@ -636,11 +643,13 @@ static int prism2_config(struct pcmcia_device *link)
*/
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
- goto failed;
+ goto failed_unlock;
dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
+ spin_unlock_irqrestore(&local->irq_init_lock, flags);
+
/* Finally, report what we've done */
printk(KERN_INFO "%s: index 0x%02x: ",
dev_info, link->conf.ConfigIndex);
@@ -667,6 +676,8 @@ static int prism2_config(struct pcmcia_device *link)
return ret;
+ failed_unlock:
+ spin_unlock_irqrestore(&local->irq_init_lock, flags);
failed:
kfree(hw_priv);
prism2_release((u_long)link);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c8..2f999fc 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2621,6 +2621,18 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
iface = netdev_priv(dev);
local = iface->local;
+ /* Detect early interrupt before driver is fully configued */
+ spin_lock(&local->irq_init_lock);
+ if (!dev->base_addr) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+ dev->name);
+ }
+ spin_unlock(&local->irq_init_lock);
+ return IRQ_HANDLED;
+ }
+ spin_unlock(&local->irq_init_lock);
+
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
if (local->func->card_present && !local->func->card_present(local)) {
@@ -3138,6 +3150,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
spin_lock_init(&local->cmdlock);
spin_lock_init(&local->baplock);
spin_lock_init(&local->lock);
+ spin_lock_init(&local->irq_init_lock);
mutex_init(&local->rid_bap_mtx);
if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index eb57d1e..eaee84b 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -741,9 +741,7 @@ void hostap_set_multicast_list_queue(struct work_struct *work)
local_info_t *local =
container_of(work, local_info_t, set_multicast_list_queue);
struct net_device *dev = local->dev;
- struct hostap_interface *iface;
- iface = netdev_priv(dev);
if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
local->is_promisc)) {
printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index c02f866..1c66b3c 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -654,7 +654,7 @@ struct local_info {
rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
* when removing entries from the list.
* TX and RX paths can use read lock. */
- spinlock_t cmdlock, baplock, lock;
+ spinlock_t cmdlock, baplock, lock, irq_init_lock;
struct mutex rid_bap_mtx;
u16 infofid; /* MAC buffer id for info frame */
/* txfid, intransmitfid, next_txtid, and next_alloc are protected by
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8feaa1d..cb2552a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -96,7 +96,7 @@ static int network_mode = 0;
static u32 ipw_debug_level;
static int associate;
static int auto_create = 1;
-static int led_support = 0;
+static int led_support = 1;
static int disable = 0;
static int bt_coexist = 0;
static int hwcrypto = 0;
@@ -12082,7 +12082,7 @@ module_param(auto_create, int, 0444);
MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
module_param_named(led, led_support, int, 0444);
-MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
+MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7c72353..728bb85 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_IWLWIFI) += iwlcore.o
iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
+iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
iwlcore-objs += iwl-scan.o iwl-led.o
iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
@@ -11,7 +11,7 @@ CFLAGS_iwl-devtrace.o := -I$(src)
obj-$(CONFIG_IWLAGN) += iwlagn.o
iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
-iwlagn-objs += iwl-agn-lib.o
+iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index dba91e0..1daf159 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -157,6 +157,8 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
+ if (priv->cfg->need_dc_calib)
+ priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
@@ -215,6 +217,7 @@ static struct iwl_lib_ops iwl1000_lib = {
.set_ct_kill = iwl1000_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
+ .update_bcast_station = iwl_update_bcast_station,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 0fa1d51..a07310f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -406,6 +406,11 @@ static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
unsigned int plcp_msec;
unsigned long plcp_received_jiffies;
+ if (priv->cfg->plcp_delta_threshold ==
+ IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+ IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+ return rc;
+ }
memcpy(&current_stat, pkt->u.raw, sizeof(struct
iwl3945_notif_statistics));
/*
@@ -844,7 +849,7 @@ static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
- iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
+ iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 83e6a42..1dd3bc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1580,7 +1580,8 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
u32 R4;
if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
+ (priv->_agn.statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
@@ -1604,8 +1605,8 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
if (!test_bit(STATUS_TEMPERATURE, &priv->status))
vt = sign_extend(R4, 23);
else
- vt = sign_extend(
- le32_to_cpu(priv->statistics.general.temperature), 23);
+ vt = sign_extend(le32_to_cpu(
+ priv->_agn.statistics.general.temperature), 23);
IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1785,6 +1786,7 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
{
unsigned long flags;
u16 ra_tid;
+ int ret;
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
@@ -1800,7 +1802,9 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
ra_tid = BUILD_RAxTID(sta_id, tid);
/* Modify device's station table to Tx this TID */
- iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+ ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+ if (ret)
+ return ret;
spin_lock_irqsave(&priv->lock, flags);
@@ -2276,6 +2280,7 @@ static struct iwl_lib_ops iwl4965_lib = {
.set_ct_kill = iwl4965_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
+ .update_bcast_station = iwl_update_bcast_station,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 32710a8..b8f3e20 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -249,10 +249,11 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial calibration set */
priv->hw_params.sens = &iwl5150_sensitivity;
priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_DC) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+ if (priv->cfg->need_dc_calib)
+ priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
@@ -264,7 +265,7 @@ static void iwl5150_temperature(struct iwl_priv *priv)
u32 vt = 0;
s32 offset = iwl_temp_calib_to_offset(priv);
- vt = le32_to_cpu(priv->statistics.general.temperature);
+ vt = le32_to_cpu(priv->_agn.statistics.general.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
/* now vt hold the temperature in Kelvin */
priv->temperature = KELVIN_TO_CELSIUS(vt);
@@ -392,6 +393,7 @@ static struct iwl_lib_ops iwl5000_lib = {
.set_ct_kill = iwl5000_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
+ .update_bcast_station = iwl_update_bcast_station,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -454,6 +456,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.set_ct_kill = iwl5150_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
+ .update_bcast_station = iwl_update_bcast_station,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -660,6 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl5150_abg_cfg = {
@@ -689,6 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index afdeec5..8577664 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -84,9 +84,10 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
}
/* Indicate calibration version to uCode. */
-static void iwl6050_set_calib_version(struct iwl_priv *priv)
+static void iwl6000_set_calib_version(struct iwl_priv *priv)
{
- if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
+ if (priv->cfg->need_dc_calib &&
+ (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6))
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -186,53 +187,8 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
-
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
-{
- if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->num_of_queues =
- priv->cfg->mod_params->num_of_queues;
-
- priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
- priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWL5000_STATION_COUNT;
- priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
-
- priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
-
- priv->hw_params.max_bsm_size = 0;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
- priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
-
- /* Set initial sensitivity parameters */
- /* Set initial calibration set */
- priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
+ if (priv->cfg->need_dc_calib)
+ priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC);
priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
@@ -359,8 +315,10 @@ static struct iwl_lib_ops iwl6000_lib = {
.temp_ops = {
.temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
+ .set_calib_version = iwl6000_set_calib_version,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
+ .update_bcast_station = iwl_update_bcast_station,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -397,79 +355,6 @@ static const struct iwl_ops iwl6000g2b_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_lib_ops iwl6050_lib = {
- .set_hw_params = iwl6050_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwlagn_txq_set_sched,
- .txq_agg_enable = iwlagn_txq_agg_enable,
- .txq_agg_disable = iwlagn_txq_agg_disable,
- .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl_hw_txq_free_tfd,
- .txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwlagn_rx_handler_setup,
- .setup_deferred_work = iwlagn_setup_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .load_ucode = iwlagn_load_ucode,
- .dump_nic_event_log = iwl_dump_nic_event_log,
- .dump_nic_error_log = iwl_dump_nic_error_log,
- .dump_csr = iwl_dump_csr,
- .dump_fh = iwl_dump_fh,
- .init_alive_start = iwlagn_init_alive_start,
- .alive_notify = iwlagn_alive_notify,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
- .set_channel_switch = iwl6000_hw_channel_switch,
- .apm_ops = {
- .init = iwl_apm_init,
- .stop = iwl_apm_stop,
- .config = iwl6000_nic_config,
- .set_pwr_src = iwl_set_pwr_src,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- .verify_signature = iwlcore_eeprom_verify_signature,
- .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
- .release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwlagn_eeprom_calib_version,
- .query_addr = iwlagn_eeprom_query_addr,
- .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
- },
- .post_associate = iwl_post_associate,
- .isr = iwl_isr_ict,
- .config_ap = iwl_config_ap,
- .temp_ops = {
- .temperature = iwlagn_temperature,
- .set_ct_kill = iwl6000_set_ct_threshold,
- .set_calib_version = iwl6050_set_calib_version,
- },
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .debugfs_ops = {
- .rx_stats_read = iwl_ucode_rx_stats_read,
- .tx_stats_read = iwl_ucode_tx_stats_read,
- .general_stats_read = iwl_ucode_general_stats_read,
- },
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
-};
-
-static const struct iwl_ops iwl6050_ops = {
- .lib = &iwl6050_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
- .led = &iwlagn_led_ops,
-};
-
-
struct iwl_cfg iwl6000g2a_2agn_cfg = {
.name = "6000 Series 2x2 AGN Gen2a",
.fw_name_pre = IWL6000G2A_FW_PRE,
@@ -505,6 +390,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2a_2abg_cfg = {
@@ -537,6 +423,9 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2a_2bg_cfg = {
@@ -569,6 +458,9 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2b_2agn_cfg = {
@@ -603,6 +495,9 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2b_2abg_cfg = {
@@ -635,6 +530,9 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2b_2bgn_cfg = {
@@ -669,6 +567,9 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2b_2bg_cfg = {
@@ -701,6 +602,9 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2b_bgn_cfg = {
@@ -735,6 +639,9 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000g2b_bg_cfg = {
@@ -767,6 +674,9 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.max_event_log_size = 512,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
/*
@@ -885,7 +795,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .ops = &iwl6050_ops,
+ .ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
@@ -914,6 +824,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -922,7 +833,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
- .ops = &iwl6050_ops,
+ .ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
@@ -949,6 +860,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .need_dc_calib = true,
};
struct iwl_cfg iwl6000_3agn_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 7e8227773..eb052b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -96,17 +96,16 @@ int iwl_send_calib_results(struct iwl_priv *priv)
hcmd.len = priv->calib_results[i].buf_len;
hcmd.data = priv->calib_results[i].buf;
ret = iwl_send_cmd_sync(priv, &hcmd);
- if (ret)
- goto err;
+ if (ret) {
+ IWL_ERR(priv, "Error %d iteration %d\n",
+ ret, i);
+ break;
+ }
}
}
- return 0;
-err:
- IWL_ERR(priv, "Error %d iteration %d\n", ret, i);
return ret;
}
-EXPORT_SYMBOL(iwl_send_calib_results);
int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
{
@@ -121,7 +120,6 @@ int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
memcpy(res->buf, buf, len);
return 0;
}
-EXPORT_SYMBOL(iwl_calib_set);
void iwl_calib_free_results(struct iwl_priv *priv)
{
@@ -133,7 +131,6 @@ void iwl_calib_free_results(struct iwl_priv *priv)
priv->calib_results[i].buf_len = 0;
}
}
-EXPORT_SYMBOL(iwl_calib_free_results);
/*****************************************************************************
* RUNTIME calibrations framework
@@ -533,7 +530,6 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
ret |= iwl_sensitivity_write(priv);
IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
}
-EXPORT_SYMBOL(iwl_init_sensitivity);
void iwl_sensitivity_calibration(struct iwl_priv *priv,
struct iwl_notif_statistics *resp)
@@ -639,7 +635,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
iwl_sensitivity_write(priv);
}
-EXPORT_SYMBOL(iwl_sensitivity_calibration);
static inline u8 find_first_chain(u8 mask)
{
@@ -846,6 +841,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
}
}
+ if (active_chains != priv->hw_params.valid_rx_ant &&
+ active_chains != priv->chain_noise_data.active_chains)
+ IWL_DEBUG_CALIB(priv,
+ "Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n",
+ active_chains, priv->hw_params.valid_rx_ant);
+
/* Save for use within RXON, TX, SCAN commands, etc. */
priv->chain_noise_data.active_chains = active_chains;
IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
@@ -890,8 +892,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
data->state = IWL_CHAIN_NOISE_DONE;
iwl_power_update_mode(priv, false);
}
-EXPORT_SYMBOL(iwl_chain_noise_calibration);
-
void iwl_reset_run_time_calib(struct iwl_priv *priv)
{
@@ -908,5 +908,3 @@ void iwl_reset_run_time_calib(struct iwl_priv *priv)
* periodically after association */
iwl_send_statistics_request(priv, CMD_ASYNC, true);
}
-EXPORT_SYMBOL(iwl_reset_run_time_calib);
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index 3d08dc8..75d6bfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -33,17 +33,17 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
int p = 0;
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->statistics.flag));
- if (le32_to_cpu(priv->statistics.flag) &
+ le32_to_cpu(priv->_agn.statistics.flag));
+ if (le32_to_cpu(priv->_agn.statistics.flag) &
UCODE_STATISTICS_CLEAR_MSK)
p += scnprintf(buf + p, bufsz - p,
"\tStatistics have been cleared\n");
p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
+ (le32_to_cpu(priv->_agn.statistics.flag) &
UCODE_STATISTICS_FREQUENCY_MSK)
? "2.4 GHz" : "5.2 GHz");
p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
+ (le32_to_cpu(priv->_agn.statistics.flag) &
UCODE_STATISTICS_NARROW_BAND_MSK)
? "enabled" : "disabled");
return p;
@@ -79,22 +79,22 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- ofdm = &priv->statistics.rx.ofdm;
- cck = &priv->statistics.rx.cck;
- general = &priv->statistics.rx.general;
- ht = &priv->statistics.rx.ofdm_ht;
- accum_ofdm = &priv->accum_statistics.rx.ofdm;
- accum_cck = &priv->accum_statistics.rx.cck;
- accum_general = &priv->accum_statistics.rx.general;
- accum_ht = &priv->accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->delta_statistics.rx.ofdm;
- delta_cck = &priv->delta_statistics.rx.cck;
- delta_general = &priv->delta_statistics.rx.general;
- delta_ht = &priv->delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->max_delta.rx.ofdm;
- max_cck = &priv->max_delta.rx.cck;
- max_general = &priv->max_delta.rx.general;
- max_ht = &priv->max_delta.rx.ofdm_ht;
+ ofdm = &priv->_agn.statistics.rx.ofdm;
+ cck = &priv->_agn.statistics.rx.cck;
+ general = &priv->_agn.statistics.rx.general;
+ ht = &priv->_agn.statistics.rx.ofdm_ht;
+ accum_ofdm = &priv->_agn.accum_statistics.rx.ofdm;
+ accum_cck = &priv->_agn.accum_statistics.rx.cck;
+ accum_general = &priv->_agn.accum_statistics.rx.general;
+ accum_ht = &priv->_agn.accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->_agn.delta_statistics.rx.ofdm;
+ delta_cck = &priv->_agn.delta_statistics.rx.cck;
+ delta_general = &priv->_agn.delta_statistics.rx.general;
+ delta_ht = &priv->_agn.delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->_agn.max_delta.rx.ofdm;
+ max_cck = &priv->_agn.max_delta.rx.cck;
+ max_general = &priv->_agn.max_delta.rx.general;
+ max_ht = &priv->_agn.max_delta.rx.ofdm_ht;
pos += iwl_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
@@ -560,10 +560,10 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- tx = &priv->statistics.tx;
- accum_tx = &priv->accum_statistics.tx;
- delta_tx = &priv->delta_statistics.tx;
- max_tx = &priv->max_delta.tx;
+ tx = &priv->_agn.statistics.tx;
+ accum_tx = &priv->_agn.accum_statistics.tx;
+ delta_tx = &priv->_agn.delta_statistics.tx;
+ max_tx = &priv->_agn.max_delta.tx;
pos += iwl_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
@@ -777,18 +777,18 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- general = &priv->statistics.general;
- dbg = &priv->statistics.general.dbg;
- div = &priv->statistics.general.div;
- accum_general = &priv->accum_statistics.general;
- delta_general = &priv->delta_statistics.general;
- max_general = &priv->max_delta.general;
- accum_dbg = &priv->accum_statistics.general.dbg;
- delta_dbg = &priv->delta_statistics.general.dbg;
- max_dbg = &priv->max_delta.general.dbg;
- accum_div = &priv->accum_statistics.general.div;
- delta_div = &priv->delta_statistics.general.div;
- max_div = &priv->max_delta.general.div;
+ general = &priv->_agn.statistics.general;
+ dbg = &priv->_agn.statistics.general.dbg;
+ div = &priv->_agn.statistics.general.div;
+ accum_general = &priv->_agn.accum_statistics.general;
+ delta_general = &priv->_agn.delta_statistics.general;
+ max_general = &priv->_agn.max_delta.general;
+ accum_dbg = &priv->_agn.accum_statistics.general.dbg;
+ delta_dbg = &priv->_agn.delta_statistics.general.dbg;
+ max_dbg = &priv->_agn.max_delta.general.dbg;
+ accum_div = &priv->_agn.accum_statistics.general.div;
+ delta_div = &priv->_agn.delta_statistics.general.div;
+ max_div = &priv->_agn.max_delta.general.div;
pos += iwl_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 3f765ba..f06d1fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -214,11 +214,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags)
{
- if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
- else
- *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
}
/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 548f51d..5f1e7d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -361,7 +361,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
void iwlagn_temperature(struct iwl_priv *priv)
{
/* store temperature from statistics (in Celsius) */
- priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
+ priv->temperature =
+ le32_to_cpu(priv->_agn.statistics.general.temperature);
iwl_tt_handler(priv);
}
@@ -486,7 +487,7 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
/* Tell device where to find RBD circular buffer in DRAM */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->dma_addr >> 8));
+ (u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
@@ -751,7 +752,7 @@ void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
}
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ rxq->bd_dma);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
@@ -904,7 +905,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
- struct iwl4965_rx_mpdu_res_start *amsdu;
+ struct iwl_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
@@ -933,7 +934,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
return;
}
phy_res = &priv->_agn.last_phy_res;
- amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 40933a5..35c86d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -324,18 +324,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if ((tid < TID_MAX_LOAD_COUNT) &&
- !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
- if (priv->cfg->use_rts_for_ht) {
- /*
- * switch to RTS/CTS if it is the prefer protection
- * method for HT traffic
- */
- IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
- iwlcore_commit_rxon(priv);
- }
- }
+ if (tid < TID_MAX_LOAD_COUNT)
+ rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+ else
+ IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
+ tid, TID_MAX_LOAD_COUNT);
}
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
new file mode 100644
index 0000000..d54edc3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -0,0 +1,284 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-calib.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
+ IWL_DEBUG_CALIB(priv,
+ "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(STATUS_SCANNING, &priv->status))
+ iwl_init_sensitivity(priv);
+ }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void iwl_rx_calc_noise(struct iwl_priv *priv)
+{
+ struct statistics_rx_non_phy *rx_info
+ = &(priv->_agn.statistics.rx.general);
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ int bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ int bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+ int last_rx_noise;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+ IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+ bcn_silence_a, bcn_silence_b, bcn_silence_c,
+ last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+ __le32 *stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *)&priv->_agn.statistics;
+ accum_stats = (u32 *)&priv->_agn.accum_statistics;
+ delta = (u32 *)&priv->_agn.delta_statistics;
+ max_delta = (u32 *)&priv->_agn.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
+ le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative statistics for "no-counter" type statistics */
+ priv->_agn.accum_statistics.general.temperature =
+ priv->_agn.statistics.general.temperature;
+ priv->_agn.accum_statistics.general.temperature_m =
+ priv->_agn.statistics.general.temperature_m;
+ priv->_agn.accum_statistics.general.ttl_timestamp =
+ priv->_agn.statistics.general.ttl_timestamp;
+ priv->_agn.accum_statistics.tx.tx_power.ant_a =
+ priv->_agn.statistics.tx.tx_power.ant_a;
+ priv->_agn.accum_statistics.tx.tx_power.ant_b =
+ priv->_agn.statistics.tx.tx_power.ant_b;
+ priv->_agn.accum_statistics.tx.tx_power.ant_c =
+ priv->_agn.statistics.tx.tx_power.ant_c;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
+
+ if (priv->cfg->plcp_delta_threshold ==
+ IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+ IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+ return rc;
+ }
+
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->_agn.statistics.rx.ofdm.plcp_err)) +
+ (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
+ le32_to_cpu(priv->_agn.statistics.rx.ofdm_ht.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold,
+ * the following data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * Received ofdm_ht.plcp_err,
+ * Current ofdm_ht.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %u, %u, %u, %d, %u mSecs\n",
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+ le32_to_cpu(
+ priv->_agn.statistics.rx.ofdm.plcp_err),
+ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+ le32_to_cpu(
+ priv->_agn.statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+ rc = false;
+ }
+ }
+ return rc;
+}
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+
+ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(priv->_agn.statistics),
+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_agn.statistics.general.temperature !=
+ pkt->u.stats.general.temperature) ||
+ ((priv->_agn.statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
+
+ memcpy(&priv->_agn.statistics, &pkt->u.stats,
+ sizeof(priv->_agn.statistics));
+
+ set_bit(STATUS_STATISTICS, &priv->status);
+
+ /* Reschedule the statistics timer to occur in
+ * REG_RECALIB_PERIOD seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&priv->statistics_periodic, jiffies +
+ msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+ (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+ iwl_rx_calc_noise(priv);
+ queue_work(priv->workqueue, &priv->run_time_calib_work);
+ }
+ if (priv->cfg->ops->lib->temp_ops.temperature && change)
+ priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+void iwl_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memset(&priv->_agn.accum_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwl_rx_statistics(priv, rxb);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index f9134ce..2573234e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -233,6 +233,7 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
{
unsigned long flags;
u16 ra_tid;
+ int ret;
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
(IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
@@ -248,7 +249,9 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
ra_tid = BUILD_RAxTID(sta_id, tid);
/* Modify device's station table to Tx this TID */
- iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+ ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+ if (ret)
+ return ret;
spin_lock_irqsave(&priv->lock, flags);
@@ -1324,6 +1327,11 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
agg = &priv->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
/* Find index just before block-ack window */
index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 69e17d7..3368cfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -941,6 +941,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
fill_rx = 1;
while (i != r) {
+ int len;
+
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -955,8 +957,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
PCI_DMA_FROMDEVICE);
pkt = rxb_addr(rxb);
- trace_iwlwifi_dev_rx(priv, pkt,
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -1458,13 +1461,13 @@ bool iwl_good_ack_health(struct iwl_priv *priv,
actual_ack_cnt_delta =
le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
- le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
+ le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
expected_ack_cnt_delta =
le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
- le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
+ le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
ba_timeout_delta =
le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
- le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
+ le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
if ((priv->_agn.agg_tids_count > 0) &&
(expected_ack_cnt_delta > 0) &&
(((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
@@ -1481,10 +1484,10 @@ bool iwl_good_ack_health(struct iwl_priv *priv,
* DEBUG is not, these will just compile out.
*/
IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
- priv->delta_statistics.tx.rx_detected_cnt);
+ priv->_agn.delta_statistics.tx.rx_detected_cnt);
IWL_DEBUG_RADIO(priv,
"ack_or_ba_timeout_collision delta = %d\n",
- priv->delta_statistics.tx.
+ priv->_agn.delta_statistics.tx.
ack_or_ba_timeout_collision);
#endif
IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
@@ -2307,9 +2310,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
blink1, blink2, ilink1, ilink2);
- IWL_ERR(priv, "Desc Time "
+ IWL_ERR(priv, "Desc Time "
"data1 data2 line\n");
- IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
+ IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
desc_lookup(desc), desc, time, data1, data2, line);
IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
@@ -2932,9 +2935,9 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
}
if (priv->start_calib) {
- iwl_chain_noise_calibration(priv, &priv->statistics);
+ iwl_chain_noise_calibration(priv, &priv->_agn.statistics);
- iwl_sensitivity_calibration(priv, &priv->statistics);
+ iwl_sensitivity_calibration(priv, &priv->_agn.statistics);
}
mutex_unlock(&priv->mutex);
@@ -3365,13 +3368,32 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
+/*
+ * switch to RTS/CTS for TX
+ */
+static void iwl_enable_rts_cts(struct iwl_priv *priv)
+{
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
+ iwlcore_commit_rxon(priv);
+ } else {
+ /* scanning, defer the request until scan completed */
+ IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
+ }
+}
+
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_priv *priv = hw->priv;
- int ret;
+ int ret = -EINVAL;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -3379,17 +3401,19 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(priv->cfg->sku & IWL_SKU_N))
return -EACCES;
+ mutex_lock(&priv->mutex);
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
IWL_DEBUG_HT(priv, "start Rx\n");
- return iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+ ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+ break;
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return 0;
- else
- return ret;
+ ret = 0;
+ break;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
@@ -3398,7 +3422,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
priv->_agn.agg_tids_count);
}
- return ret;
+ break;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
@@ -3408,18 +3432,22 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
priv->_agn.agg_tids_count);
}
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return 0;
- else
- return ret;
+ ret = 0;
+ break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- /* do nothing */
- return -EOPNOTSUPP;
- default:
- IWL_DEBUG_HT(priv, "unknown\n");
- return -EINVAL;
+ if (priv->cfg->use_rts_for_ht) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+ iwl_enable_rts_cts(priv);
+ }
+ ret = 0;
break;
}
- return 0;
+ mutex_unlock(&priv->mutex);
+
+ return ret;
}
static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
@@ -3462,10 +3490,12 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
int ret;
u8 sta_id;
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
atomic_set(&sta_priv->pending_frames, 0);
if (vif->type == NL80211_IFTYPE_AP)
@@ -3477,6 +3507,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -3486,6 +3517,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl_rs_rate_init(priv, sta, sta_id);
+ mutex_unlock(&priv->mutex);
return 0;
}
@@ -3638,6 +3670,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work(&priv->scan_check);
cancel_work_sync(&priv->start_internal_scan);
cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5c32777..be9d298 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -201,6 +201,16 @@ static inline bool iwl_is_tx_success(u32 status)
(status == TX_STATUS_DIRECT_DONE);
}
+/* rx */
+void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
/* scan */
void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index c579965..acf8e98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1366,7 +1366,7 @@ struct iwl_rx_phy_res {
__le16 reserved3;
} __packed;
-struct iwl4965_rx_mpdu_res_start {
+struct iwl_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
} __packed;
@@ -1399,18 +1399,27 @@ struct iwl4965_rx_mpdu_res_start {
/* REPLY_TX Tx flags field */
-/* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
+/*
+ * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
* before this frame. if CTS-to-self required check
- * RXON_FLG_SELF_CTS_EN status. */
-#define TX_CMD_FLG_RTS_CTS_MSK cpu_to_le32(1 << 0)
+ * RXON_FLG_SELF_CTS_EN status.
+ * unused in 3945/4965, used in 5000 series and after
+ */
+#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
-/* 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ * used in 3945/4965, unused in 5000 series and after
+ */
#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-/* 1: Transmit Clear-To-Send to self before this frame.
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
* Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ * used in 3945/4965, unused in 5000 series and after
+ */
#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
/* 1: Expect ACK from receiving station
@@ -1430,8 +1439,11 @@ struct iwl4965_rx_mpdu_res_start {
* Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
-/* 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set. */
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ * used in 3945/4965, unused in 5000 series and after
+ */
#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
/* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 62c50bc..a56fb46 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1331,7 +1331,6 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
changed_flags, *total_flags);
CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -1346,6 +1345,12 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
@@ -2105,6 +2110,9 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
spin_unlock_irqrestore(&priv->lock, flags);
+ if (priv->cfg->ops->lib->update_bcast_station)
+ ret = priv->cfg->ops->lib->update_bcast_station(priv);
+
set_ch_out:
/* The list of supported rates and rate mask can be different
* for each band; since the band may have changed, reset
@@ -2837,6 +2845,7 @@ int iwl_pci_resume(struct pci_dev *pdev)
{
struct iwl_priv *priv = pci_get_drvdata(pdev);
int ret;
+ bool hw_rfkill = false;
/*
* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -2851,6 +2860,17 @@ int iwl_pci_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
iwl_enable_interrupts(priv);
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+
return 0;
}
EXPORT_SYMBOL(iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 76288c5..15930e0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -196,6 +196,7 @@ struct iwl_lib_ops {
/* station management */
int (*manage_ibss_station)(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
+ int (*update_bcast_station)(struct iwl_priv *priv);
/* recover from tx queue stall */
void (*recover_from_tx_stall)(unsigned long data);
/* check for plcp health */
@@ -330,6 +331,7 @@ struct iwl_cfg {
const bool chain_noise_calib_by_driver;
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+ const bool need_dc_calib;
};
/***************************
@@ -455,20 +457,10 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* Handlers */
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
-bool iwl_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-bool iwl_good_ack_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
void iwl_recover_from_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt);
-void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index d9f21bb..7d9ffc1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -1018,8 +1018,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
rxq->write);
pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
rxq->free_count);
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1425,10 +1430,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%d", &plcp) != 1)
return -EINVAL;
- if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
priv->cfg->plcp_delta_threshold =
- IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
+ IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
else
priv->cfg->plcp_delta_threshold = plcp;
return count;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index fc6072c..728752a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -348,7 +348,7 @@ struct iwl_host_cmd {
/**
* struct iwl_rx_queue - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -362,7 +362,7 @@ struct iwl_host_cmd {
*/
struct iwl_rx_queue {
__le32 *bd;
- dma_addr_t dma_addr;
+ dma_addr_t bd_dma;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
@@ -1036,11 +1036,12 @@ struct iwl_event_log {
* This is the threshold value of plcp error rate per 100mSecs. It is
* used to set and check for the validity of plcp_delta.
*/
-#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
@@ -1224,13 +1225,6 @@ struct iwl_priv {
struct iwl_power_mgr power_data;
struct iwl_tt_mgmt thermal_throttle;
- struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct iwl_notif_statistics accum_statistics;
- struct iwl_notif_statistics delta_statistics;
- struct iwl_notif_statistics max_delta;
-#endif
-
/* context information */
u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
@@ -1323,6 +1317,13 @@ struct iwl_priv {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+ struct iwl_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
+#endif
} _agn;
#endif
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index ee11452..a45d02e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -629,6 +629,9 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
calib_ver < priv->cfg->eeprom_calib_ver)
goto err;
+ IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ eeprom_ver, calib_ver);
+
return 0;
err:
IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 5e32057..b437f31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -175,7 +175,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
@@ -199,32 +199,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
err_rb:
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ rxq->bd_dma);
err_bd:
return -ENOMEM;
}
EXPORT_SYMBOL(iwl_rx_queue_alloc);
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
-
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
- priv->missed_beacon_threshold) {
- IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consecutive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl_init_sensitivity(priv);
- }
-}
-EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -243,161 +223,6 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
-
-
-/* Calculate noise level, based on measurements during network silence just
- * before arriving beacon. This measurement can be done only if we know
- * exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
-{
- struct statistics_rx_non_phy *rx_info
- = &(priv->statistics.rx.general);
- int num_active_rx = 0;
- int total_silence = 0;
- int bcn_silence_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
- int bcn_silence_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
- int bcn_silence_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
- int last_rx_noise;
-
- if (bcn_silence_a) {
- total_silence += bcn_silence_a;
- num_active_rx++;
- }
- if (bcn_silence_b) {
- total_silence += bcn_silence_b;
- num_active_rx++;
- }
- if (bcn_silence_c) {
- total_silence += bcn_silence_c;
- num_active_rx++;
- }
-
- /* Average among active antennas */
- if (num_active_rx)
- last_rx_noise = (total_silence / num_active_rx) - 107;
- else
- last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
- bcn_silence_a, bcn_silence_b, bcn_silence_c,
- last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
-static void iwl_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
-
- prev_stats = (__le32 *)&priv->statistics;
- accum_stats = (u32 *)&priv->accum_statistics;
- delta = (u32 *)&priv->delta_statistics;
- max_delta = (u32 *)&priv->max_delta;
-
- for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- priv->accum_statistics.general.temperature =
- priv->statistics.general.temperature;
- priv->accum_statistics.general.temperature_m =
- priv->statistics.general.temperature_m;
- priv->accum_statistics.general.ttl_timestamp =
- priv->statistics.general.ttl_timestamp;
- priv->accum_statistics.tx.tx_power.ant_a =
- priv->statistics.tx.tx_power.ant_a;
- priv->accum_statistics.tx.tx_power.ant_b =
- priv->statistics.tx.tx_power.ant_b;
- priv->accum_statistics.tx.tx_power.ant_c =
- priv->statistics.tx.tx_power.ant_c;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-bool iwl_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- int combined_plcp_delta;
- unsigned int plcp_msec;
- unsigned long plcp_received_jiffies;
-
- /*
- * check for plcp_err and trigger radio reset if it exceeds
- * the plcp error threshold plcp_delta.
- */
- plcp_received_jiffies = jiffies;
- plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
- (long) priv->plcp_jiffies);
- priv->plcp_jiffies = plcp_received_jiffies;
- /*
- * check to make sure plcp_msec is not 0 to prevent division
- * by zero.
- */
- if (plcp_msec) {
- combined_plcp_delta =
- (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
- le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
- (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
- le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
-
- if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
- priv->cfg->plcp_delta_threshold) {
- /*
- * if plcp_err exceed the threshold,
- * the following data is printed in csv format:
- * Text: plcp_err exceeded %d,
- * Received ofdm.plcp_err,
- * Current ofdm.plcp_err,
- * Received ofdm_ht.plcp_err,
- * Current ofdm_ht.plcp_err,
- * combined_plcp_delta,
- * plcp_msec
- */
- IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
- "%u, %u, %u, %u, %d, %u mSecs\n",
- priv->cfg->plcp_delta_threshold,
- le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
- le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
- le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
- le32_to_cpu(
- priv->statistics.rx.ofdm_ht.plcp_err),
- combined_plcp_delta, plcp_msec);
- rc = false;
- }
- }
- return rc;
-}
-EXPORT_SYMBOL(iwl_good_plcp_health);
-
void iwl_recover_from_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
@@ -431,69 +256,6 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_recover_from_statistics);
-void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(priv->statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->statistics.general.temperature !=
- pkt->u.stats.general.temperature) ||
- ((priv->statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
- iwl_recover_from_statistics(priv, pkt);
-
- memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
-
- set_bit(STATUS_STATISTICS, &priv->status);
-
- /* Reschedule the statistics timer to occur in
- * REG_RECALIB_PERIOD seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
- mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
- }
- if (priv->cfg->ops->lib->temp_ops.temperature && change)
- priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-EXPORT_SYMBOL(iwl_rx_statistics);
-
-void iwl_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- memset(&priv->accum_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->delta_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->max_delta, 0,
- sizeof(struct iwl_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl_rx_statistics(priv, rxb);
-}
-EXPORT_SYMBOL(iwl_reply_statistics);
-
/*
* returns non-zero if packet should be dropped
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index b8bcd48..2a7c399 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -500,6 +500,7 @@ void iwl_bg_abort_scan(struct work_struct *work)
mutex_lock(&priv->mutex);
+ cancel_delayed_work_sync(&priv->scan_check);
set_bit(STATUS_SCAN_ABORTING, &priv->status);
iwl_send_scan_abort(priv);
@@ -536,6 +537,15 @@ void iwl_bg_scan_completed(struct work_struct *work)
/* Since setting the TXPOWER may have been deferred while
* performing the scan, fire one off */
iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&priv->active_rxon,
+ &priv->staging_rxon, sizeof(priv->staging_rxon)))
+ iwlcore_commit_rxon(priv);
+
out:
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c712713..9511f03 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -30,6 +30,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <linux/sched.h>
+#include <linux/lockdep.h>
#include "iwl-dev.h"
#include "iwl-core.h"
@@ -54,18 +55,19 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
}
}
-static void iwl_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
- bool sync)
+static int iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
{
u8 sta_id = addsta->sta.sta_id;
unsigned long flags;
+ int ret = -EIO;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
pkt->hdr.flags);
- return;
+ return ret;
}
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
@@ -77,6 +79,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
case ADD_STA_SUCCESS_MSK:
IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
iwl_sta_ucode_activate(priv, sta_id);
+ ret = 0;
break;
case ADD_STA_NO_ROOM_IN_TABLE:
IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
@@ -114,6 +117,8 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return ret;
}
static void iwl_add_sta_callback(struct iwl_priv *priv,
@@ -145,8 +150,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (flags & CMD_ASYNC)
cmd.callback = iwl_add_sta_callback;
- else
+ else {
cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
ret = iwl_send_cmd(priv, &cmd);
@@ -156,7 +163,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret == 0) {
pkt = (struct iwl_rx_packet *)cmd.reply_page;
- iwl_process_add_sta_resp(priv, sta, pkt, true);
+ ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
}
iwl_free_pages(priv, cmd.reply_page);
@@ -831,7 +838,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
{
unsigned long flags;
__le16 key_flags = 0;
- int ret;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -871,11 +880,10 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
@@ -884,7 +892,9 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
{
unsigned long flags;
__le16 key_flags = 0;
- int ret;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -919,11 +929,10 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -1013,9 +1022,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
u8 sta_id)
{
unsigned long flags;
- int ret = 0;
u16 key_flags;
u8 keyidx;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
priv->key_mapping_key--;
@@ -1062,9 +1073,10 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
- ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
+
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
EXPORT_SYMBOL(iwl_remove_dynamic_key);
@@ -1073,6 +1085,8 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
{
int ret;
+ lockdep_assert_held(&priv->mutex);
+
priv->key_mapping_key++;
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
@@ -1245,6 +1259,36 @@ int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
}
EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
+/**
+ * iwl_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwlagn. Placed here to have all bcast station management
+ * code together.
+ */
+int iwl_update_bcast_station(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_link_quality_cmd *link_cmd;
+ u8 sta_id = priv->hw_params.bcast_sta_id;
+
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].lq)
+ kfree(priv->stations[sta_id].lq);
+ else
+ IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iwl_update_bcast_station);
+
void iwl_dealloc_bcast_station(struct iwl_priv *priv)
{
unsigned long flags;
@@ -1268,17 +1312,22 @@ EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
/**
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
-void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
{
unsigned long flags;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
/* Remove "disable" flag, to enable Tx for this TID */
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
@@ -1287,6 +1336,9 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
{
unsigned long flags;
int sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
@@ -1298,10 +1350,10 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
- CMD_ASYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
EXPORT_SYMBOL(iwl_sta_rx_agg_start);
@@ -1309,7 +1361,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid)
{
unsigned long flags;
- int sta_id, ret;
+ int sta_id;
+ struct iwl_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -1322,11 +1377,10 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
@@ -1373,10 +1427,14 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
if (ret)
IWL_ERR(priv, "Error removing station %pM\n",
sta->addr);
+ mutex_unlock(&priv->mutex);
return ret;
}
EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 5b1b1e4..ba95b1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -60,6 +60,7 @@ void iwl_restore_stations(struct iwl_priv *priv);
void iwl_clear_ucode_stations(struct iwl_priv *priv);
int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
void iwl_dealloc_bcast_station(struct iwl_priv *priv);
+int iwl_update_bcast_station(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
@@ -73,7 +74,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid, u16 ssn);
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 0f16c7d..8eb3471 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1171,7 +1171,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
}
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ rxq->bd_dma);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
@@ -1252,6 +1252,8 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
while (i != r) {
+ int len;
+
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -1266,8 +1268,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
PCI_DMA_FROMDEVICE);
pkt = rxb_addr(rxb);
- trace_iwlwifi_dev_rx(priv, pkt,
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -1421,7 +1424,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
IWL_ERR(priv,
- "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+ "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
desc_lookup(desc), desc, time, blink1, blink2,
ilink1, ilink2, data1);
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
@@ -3360,10 +3363,13 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
u8 sta_id;
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
&sta_id);
@@ -3371,6 +3377,7 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -3380,6 +3387,7 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl3945_rs_rate_init(priv, sta, sta_id);
+ mutex_unlock(&priv->mutex);
return 0;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 902e95f..6061967 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -670,20 +670,24 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
}
static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
- enum tx_power_setting type, int dbm)
+ enum nl80211_tx_power_setting type, int mbm)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
int ret;
switch (type) {
- case TX_POWER_AUTOMATIC:
+ case NL80211_TX_POWER_AUTOMATIC:
return 0;
- case TX_POWER_FIXED:
+ case NL80211_TX_POWER_FIXED:
+ if (mbm < 0 || (mbm % 100))
+ return -EOPNOTSUPP;
+
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return 0;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_TX_PWR_LIMIT_USR, dbm * 2);
+ CFG_TX_PWR_LIMIT_USR,
+ MBM_TO_DBM(mbm) * 2);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 45e870e..f7d01bf 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,4 +1,3 @@
-libertas-y += assoc.o
libertas-y += cfg.o
libertas-y += cmd.o
libertas-y += cmdresp.o
@@ -6,9 +5,7 @@ libertas-y += debugfs.o
libertas-y += ethtool.o
libertas-y += main.o
libertas-y += rx.o
-libertas-y += scan.o
libertas-y += tx.o
-libertas-y += wext.o
libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
usb8xxx-objs += if_usb.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
deleted file mode 100644
index aa06070e..0000000
--- a/drivers/net/wireless/libertas/assoc.c
+++ /dev/null
@@ -1,2264 +0,0 @@
-/* Copyright (C) 2006, Red Hat, Inc. */
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/if_arp.h>
-#include <linux/slab.h>
-#include <net/lib80211.h>
-
-#include "assoc.h"
-#include "decl.h"
-#include "host.h"
-#include "scan.h"
-#include "cmd.h"
-
-static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
- { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
-/* The firmware needs the following bits masked out of the beacon-derived
- * capability field when associating/joining to a BSS:
- * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
- */
-#define CAPINFO_MASK (~(0xda00))
-
-/**
- * 802.11b/g supported bitrates (in 500Kb/s units)
- */
-u8 lbs_bg_rates[MAX_RATES] =
- { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
-0x00, 0x00 };
-
-
-static int assoc_helper_wep_keys(struct lbs_private *priv,
- struct assoc_request *assoc_req);
-
-/**
- * @brief This function finds common rates between rates and card rates.
- *
- * It will fill common rates in rates as output if found.
- *
- * NOTE: Setting the MSB of the basic rates need to be taken
- * care, either before or after calling this function
- *
- * @param priv A pointer to struct lbs_private structure
- * @param rates the buffer which keeps input and output
- * @param rates_size the size of rates buffer; new size of buffer on return,
- * which will be less than or equal to original rates_size
- *
- * @return 0 on success, or -1 on error
- */
-static int get_common_rates(struct lbs_private *priv,
- u8 *rates,
- u16 *rates_size)
-{
- int i, j;
- u8 intersection[MAX_RATES];
- u16 intersection_size;
- u16 num_rates = 0;
-
- intersection_size = min_t(u16, *rates_size, ARRAY_SIZE(intersection));
-
- /* Allow each rate from 'rates' that is supported by the hardware */
- for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && lbs_bg_rates[i]; i++) {
- for (j = 0; j < intersection_size && rates[j]; j++) {
- if (rates[j] == lbs_bg_rates[i])
- intersection[num_rates++] = rates[j];
- }
- }
-
- lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
- lbs_deb_hex(LBS_DEB_JOIN, "card rates ", lbs_bg_rates,
- ARRAY_SIZE(lbs_bg_rates));
- lbs_deb_hex(LBS_DEB_JOIN, "common rates", intersection, num_rates);
- lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
-
- if (!priv->enablehwauto) {
- for (i = 0; i < num_rates; i++) {
- if (intersection[i] == priv->cur_rate)
- goto done;
- }
- lbs_pr_alert("Previously set fixed data rate %#x isn't "
- "compatible with the network.\n", priv->cur_rate);
- return -1;
- }
-
-done:
- memset(rates, 0, *rates_size);
- *rates_size = num_rates;
- memcpy(rates, intersection, num_rates);
- return 0;
-}
-
-
-/**
- * @brief Sets the MSB on basic rates as the firmware requires
- *
- * Scan through an array and set the MSB for basic data rates.
- *
- * @param rates buffer of data rates
- * @param len size of buffer
- */
-static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (rates[i] == 0x02 || rates[i] == 0x04 ||
- rates[i] == 0x0b || rates[i] == 0x16)
- rates[i] |= 0x80;
- }
-}
-
-
-static u8 iw_auth_to_ieee_auth(u8 auth)
-{
- if (auth == IW_AUTH_ALG_OPEN_SYSTEM)
- return 0x00;
- else if (auth == IW_AUTH_ALG_SHARED_KEY)
- return 0x01;
- else if (auth == IW_AUTH_ALG_LEAP)
- return 0x80;
-
- lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth);
- return 0;
-}
-
-/**
- * @brief This function prepares the authenticate command. AUTHENTICATE only
- * sets the authentication suite for future associations, as the firmware
- * handles authentication internally during the ASSOCIATE command.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param bssid The peer BSSID with which to authenticate
- * @param auth The authentication mode to use (from wireless.h)
- *
- * @return 0 or -1
- */
-static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth)
-{
- struct cmd_ds_802_11_authenticate cmd;
- int ret = -1;
-
- lbs_deb_enter(LBS_DEB_JOIN);
-
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- memcpy(cmd.bssid, bssid, ETH_ALEN);
-
- cmd.authtype = iw_auth_to_ieee_auth(auth);
-
- lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", bssid, cmd.authtype);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
-
- lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
- return ret;
-}
-
-
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
- struct assoc_request *assoc)
-{
- struct cmd_ds_802_11_set_wep cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
- cmd.action = cpu_to_le16(cmd_action);
-
- if (cmd_action == CMD_ACT_ADD) {
- int i;
-
- /* default tx key index */
- cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
- CMD_WEP_KEY_INDEX_MASK);
-
- /* Copy key types and material to host command structure */
- for (i = 0; i < 4; i++) {
- struct enc_key *pkey = &assoc->wep_keys[i];
-
- switch (pkey->len) {
- case KEY_LEN_WEP_40:
- cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
- memmove(cmd.keymaterial[i], pkey->key, pkey->len);
- lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
- break;
- case KEY_LEN_WEP_104:
- cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
- memmove(cmd.keymaterial[i], pkey->key, pkey->len);
- lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
- break;
- case 0:
- break;
- default:
- lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
- i, pkey->len);
- ret = -1;
- goto done;
- break;
- }
- }
- } else if (cmd_action == CMD_ACT_REMOVE) {
- /* ACT_REMOVE clears _all_ WEP keys */
-
- /* default tx key index */
- cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
- CMD_WEP_KEY_INDEX_MASK);
- lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
- }
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
-done:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
- uint16_t *enable)
-{
- struct cmd_ds_802_11_enable_rsn cmd;
- int ret;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(cmd_action);
-
- if (cmd_action == CMD_ACT_GET)
- cmd.enable = 0;
- else {
- if (*enable)
- cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
- else
- cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
- lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
- }
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
- if (!ret && cmd_action == CMD_ACT_GET)
- *enable = le16_to_cpu(cmd.enable);
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
- struct enc_key *key)
-{
- lbs_deb_enter(LBS_DEB_CMD);
-
- if (key->flags & KEY_INFO_WPA_ENABLED)
- keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
- if (key->flags & KEY_INFO_WPA_UNICAST)
- keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
- if (key->flags & KEY_INFO_WPA_MCAST)
- keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
-
- keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
- keyparam->keytypeid = cpu_to_le16(key->type);
- keyparam->keylen = cpu_to_le16(key->len);
- memcpy(keyparam->key, key->key, key->len);
-
- /* Length field doesn't include the {type,length} header */
- keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
- lbs_deb_leave(LBS_DEB_CMD);
-}
-
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
- struct assoc_request *assoc)
-{
- struct cmd_ds_802_11_key_material cmd;
- int ret = 0;
- int index = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- cmd.action = cpu_to_le16(cmd_action);
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
- if (cmd_action == CMD_ACT_GET) {
- cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
- } else {
- memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
-
- if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
- set_one_wpa_key(&cmd.keyParamSet[index],
- &assoc->wpa_unicast_key);
- index++;
- }
-
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
- set_one_wpa_key(&cmd.keyParamSet[index],
- &assoc->wpa_mcast_key);
- index++;
- }
-
- /* The common header and as many keys as we included */
- cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
- keyParamSet[index]));
- }
- ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
- /* Copy the returned key to driver private data */
- if (!ret && cmd_action == CMD_ACT_GET) {
- void *buf_ptr = cmd.keyParamSet;
- void *resp_end = &(&cmd)[1];
-
- while (buf_ptr < resp_end) {
- struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
- struct enc_key *key;
- uint16_t param_set_len = le16_to_cpu(keyparam->length);
- uint16_t key_len = le16_to_cpu(keyparam->keylen);
- uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
- uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
- void *end;
-
- end = (void *)keyparam + sizeof(keyparam->type)
- + sizeof(keyparam->length) + param_set_len;
-
- /* Make sure we don't access past the end of the IEs */
- if (end > resp_end)
- break;
-
- if (key_flags & KEY_INFO_WPA_UNICAST)
- key = &priv->wpa_unicast_key;
- else if (key_flags & KEY_INFO_WPA_MCAST)
- key = &priv->wpa_mcast_key;
- else
- break;
-
- /* Copy returned key into driver */
- memset(key, 0, sizeof(struct enc_key));
- if (key_len > sizeof(key->key))
- break;
- key->type = key_type;
- key->flags = key_flags;
- key->len = key_len;
- memcpy(key->key, keyparam->key, key->len);
-
- buf_ptr = end + 1;
- }
- }
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
-{
-/* Bit Rate
-* 15:13 Reserved
-* 12 54 Mbps
-* 11 48 Mbps
-* 10 36 Mbps
-* 9 24 Mbps
-* 8 18 Mbps
-* 7 12 Mbps
-* 6 9 Mbps
-* 5 6 Mbps
-* 4 Reserved
-* 3 11 Mbps
-* 2 5.5 Mbps
-* 1 2 Mbps
-* 0 1 Mbps
-**/
-
- uint16_t ratemask;
- int i = lbs_data_rate_to_fw_index(rate);
- if (lower_rates_ok)
- ratemask = (0x1fef >> (12 - i));
- else
- ratemask = (1 << i);
- return cpu_to_le16(ratemask);
-}
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
- uint16_t cmd_action)
-{
- struct cmd_ds_802_11_rate_adapt_rateset cmd;
- int ret;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- if (!priv->cur_rate && !priv->enablehwauto)
- return -EINVAL;
-
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
- cmd.action = cpu_to_le16(cmd_action);
- cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
- cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
- ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
- if (!ret && cmd_action == CMD_ACT_GET)
- priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Set the data rate
- *
- * @param priv A pointer to struct lbs_private structure
- * @param rate The desired data rate, or 0 to clear a locked rate
- *
- * @return 0 on success, error on failure
- */
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
-{
- struct cmd_ds_802_11_data_rate cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
- if (rate > 0) {
- cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
- cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
- if (cmd.rates[0] == 0) {
- lbs_deb_cmd("DATA_RATE: invalid requested rate of"
- " 0x%02X\n", rate);
- ret = 0;
- goto out;
- }
- lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
- } else {
- cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
- lbs_deb_cmd("DATA_RATE: setting auto\n");
- }
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
- if (ret)
- goto out;
-
- lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
-
- /* FIXME: get actual rates FW can do if this command actually returns
- * all data rates supported.
- */
- priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
- lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
-
-out:
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-
-int lbs_cmd_802_11_rssi(struct lbs_private *priv,
- struct cmd_ds_command *cmd)
-{
-
- lbs_deb_enter(LBS_DEB_CMD);
- cmd->command = cpu_to_le16(CMD_802_11_RSSI);
- cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
- sizeof(struct cmd_header));
- cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
-
- /* reset Beacon SNR/NF/RSSI values */
- priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
- priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
- priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
- priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
- priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
- priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
-
- lbs_deb_leave(LBS_DEB_CMD);
- return 0;
-}
-
-int lbs_ret_802_11_rssi(struct lbs_private *priv,
- struct cmd_ds_command *resp)
-{
- struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- /* store the non average value */
- priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
- priv->NF[TYPE_BEACON][TYPE_NOAVG] =
- get_unaligned_le16(&rssirsp->noisefloor);
-
- priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
- priv->NF[TYPE_BEACON][TYPE_AVG] =
- get_unaligned_le16(&rssirsp->avgnoisefloor);
-
- priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
- CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
- priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-
- priv->RSSI[TYPE_BEACON][TYPE_AVG] =
- CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
- priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
-
- lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
- priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
- priv->RSSI[TYPE_BEACON][TYPE_AVG]);
-
- lbs_deb_leave(LBS_DEB_CMD);
- return 0;
-}
-
-
-int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
- struct cmd_ds_command *cmd,
- u16 cmd_action)
-{
- struct cmd_ds_802_11_beacon_control
- *bcn_ctrl = &cmd->params.bcn_ctrl;
-
- lbs_deb_enter(LBS_DEB_CMD);
- cmd->size =
- cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
- + sizeof(struct cmd_header));
- cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
-
- bcn_ctrl->action = cpu_to_le16(cmd_action);
- bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
- bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
-
- lbs_deb_leave(LBS_DEB_CMD);
- return 0;
-}
-
-int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
- struct cmd_ds_command *resp)
-{
- struct cmd_ds_802_11_beacon_control *bcn_ctrl =
- &resp->params.bcn_ctrl;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- if (bcn_ctrl->action == CMD_ACT_GET) {
- priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
- priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
- }
-
- lbs_deb_enter(LBS_DEB_CMD);
- return 0;
-}
-
-
-
-static int lbs_assoc_post(struct lbs_private *priv,
- struct cmd_ds_802_11_associate_response *resp)
-{
- int ret = 0;
- union iwreq_data wrqu;
- struct bss_descriptor *bss;
- u16 status_code;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- if (!priv->in_progress_assoc_req) {
- lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
- ret = -1;
- goto done;
- }
- bss = &priv->in_progress_assoc_req->bss;
-
- /*
- * Older FW versions map the IEEE 802.11 Status Code in the association
- * response to the following values returned in resp->statuscode:
- *
- * IEEE Status Code Marvell Status Code
- * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
- * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
- * others -> 0x0003 ASSOC_RESULT_REFUSED
- *
- * Other response codes:
- * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
- * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
- * association response from the AP)
- */
-
- status_code = le16_to_cpu(resp->statuscode);
- if (priv->fwrelease < 0x09000000) {
- switch (status_code) {
- case 0x00:
- break;
- case 0x01:
- lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
- break;
- case 0x02:
- lbs_deb_assoc("ASSOC_RESP: internal timer "
- "expired while waiting for the AP\n");
- break;
- case 0x03:
- lbs_deb_assoc("ASSOC_RESP: association "
- "refused by AP\n");
- break;
- case 0x04:
- lbs_deb_assoc("ASSOC_RESP: authentication "
- "refused by AP\n");
- break;
- default:
- lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
- " unknown\n", status_code);
- break;
- }
- } else {
- /* v9+ returns the AP's association response */
- lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code);
- }
-
- if (status_code) {
- lbs_mac_event_disconnected(priv);
- ret = status_code;
- goto done;
- }
-
- lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP",
- (void *) (resp + sizeof (resp->hdr)),
- le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr));
-
- /* Send a Media Connected event, according to the Spec */
- priv->connect_status = LBS_CONNECTED;
-
- /* Update current SSID and BSSID */
- memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
- priv->curbssparams.ssid_len = bss->ssid_len;
- memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
-
- priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
- priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
-
- memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
- memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
- priv->nextSNRNF = 0;
- priv->numSNRNF = 0;
-
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
-
- memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
-done:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief This function prepares an association-class command.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param assoc_req The association request describing the BSS to associate
- * or reassociate with
- * @param command The actual command, either CMD_802_11_ASSOCIATE or
- * CMD_802_11_REASSOCIATE
- *
- * @return 0 or -1
- */
-static int lbs_associate(struct lbs_private *priv,
- struct assoc_request *assoc_req,
- u16 command)
-{
- struct cmd_ds_802_11_associate cmd;
- int ret = 0;
- struct bss_descriptor *bss = &assoc_req->bss;
- u8 *pos = &(cmd.iebuf[0]);
- u16 tmpcap, tmplen, tmpauth;
- struct mrvl_ie_ssid_param_set *ssid;
- struct mrvl_ie_ds_param_set *ds;
- struct mrvl_ie_cf_param_set *cf;
- struct mrvl_ie_rates_param_set *rates;
- struct mrvl_ie_rsn_param_set *rsn;
- struct mrvl_ie_auth_type *auth;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- BUG_ON((command != CMD_802_11_ASSOCIATE) &&
- (command != CMD_802_11_REASSOCIATE));
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.command = cpu_to_le16(command);
-
- /* Fill in static fields */
- memcpy(cmd.bssid, bss->bssid, ETH_ALEN);
- cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
-
- /* Capability info */
- tmpcap = (bss->capability & CAPINFO_MASK);
- if (bss->mode == IW_MODE_INFRA)
- tmpcap |= WLAN_CAPABILITY_ESS;
- cmd.capability = cpu_to_le16(tmpcap);
- lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
-
- /* SSID */
- ssid = (struct mrvl_ie_ssid_param_set *) pos;
- ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
- tmplen = bss->ssid_len;
- ssid->header.len = cpu_to_le16(tmplen);
- memcpy(ssid->ssid, bss->ssid, tmplen);
- pos += sizeof(ssid->header) + tmplen;
-
- ds = (struct mrvl_ie_ds_param_set *) pos;
- ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
- ds->header.len = cpu_to_le16(1);
- ds->channel = bss->phy.ds.channel;
- pos += sizeof(ds->header) + 1;
-
- cf = (struct mrvl_ie_cf_param_set *) pos;
- cf->header.type = cpu_to_le16(TLV_TYPE_CF);
- tmplen = sizeof(*cf) - sizeof (cf->header);
- cf->header.len = cpu_to_le16(tmplen);
- /* IE payload should be zeroed, firmware fills it in for us */
- pos += sizeof(*cf);
-
- rates = (struct mrvl_ie_rates_param_set *) pos;
- rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
- tmplen = min_t(u16, ARRAY_SIZE(bss->rates), MAX_RATES);
- memcpy(&rates->rates, &bss->rates, tmplen);
- if (get_common_rates(priv, rates->rates, &tmplen)) {
- ret = -1;
- goto done;
- }
- pos += sizeof(rates->header) + tmplen;
- rates->header.len = cpu_to_le16(tmplen);
- lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
-
- /* Copy the infra. association rates into Current BSS state structure */
- memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
- memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
-
- /* Set MSB on basic rates as the firmware requires, but _after_
- * copying to current bss rates.
- */
- lbs_set_basic_rate_flags(rates->rates, tmplen);
-
- /* Firmware v9+ indicate authentication suites as a TLV */
- if (priv->fwrelease >= 0x09000000) {
- auth = (struct mrvl_ie_auth_type *) pos;
- auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
- auth->header.len = cpu_to_le16(2);
- tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode);
- auth->auth = cpu_to_le16(tmpauth);
- pos += sizeof(auth->header) + 2;
-
- lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n",
- bss->bssid, priv->secinfo.auth_mode);
- }
-
- /* WPA/WPA2 IEs */
- if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
- rsn = (struct mrvl_ie_rsn_param_set *) pos;
- /* WPA_IE or WPA2_IE */
- rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
- tmplen = (u16) assoc_req->wpa_ie[1];
- rsn->header.len = cpu_to_le16(tmplen);
- memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
- lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn,
- sizeof(rsn->header) + tmplen);
- pos += sizeof(rsn->header) + tmplen;
- }
-
- cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) +
- (u16)(pos - (u8 *) &cmd.iebuf));
-
- /* update curbssparams */
- priv->channel = bss->phy.ds.channel;
-
- ret = lbs_cmd_with_response(priv, command, &cmd);
- if (ret == 0) {
- ret = lbs_assoc_post(priv,
- (struct cmd_ds_802_11_associate_response *) &cmd);
- }
-
-done:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Associate to a specific BSS discovered in a scan
- *
- * @param priv A pointer to struct lbs_private structure
- * @param assoc_req The association request describing the BSS to associate with
- *
- * @return 0-success, otherwise fail
- */
-static int lbs_try_associate(struct lbs_private *priv,
- struct assoc_request *assoc_req)
-{
- int ret;
- u8 preamble = RADIO_PREAMBLE_LONG;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- /* FW v9 and higher indicate authentication suites as a TLV in the
- * association command, not as a separate authentication command.
- */
- if (priv->fwrelease < 0x09000000) {
- ret = lbs_set_authentication(priv, assoc_req->bss.bssid,
- priv->secinfo.auth_mode);
- if (ret)
- goto out;
- }
-
- /* Use short preamble only when both the BSS and firmware support it */
- if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
- preamble = RADIO_PREAMBLE_SHORT;
-
- ret = lbs_set_radio(priv, preamble, 1);
- if (ret)
- goto out;
-
- ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
- /* If the association fails with current auth mode, let's
- * try by changing the auth mode
- */
- if ((priv->authtype_auto) &&
- (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
- (assoc_req->secinfo.wep_enabled) &&
- (priv->connect_status != LBS_CONNECTED)) {
- if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
- priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
- else
- priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- if (!assoc_helper_wep_keys(priv, assoc_req))
- ret = lbs_associate(priv, assoc_req,
- CMD_802_11_ASSOCIATE);
- }
-
- if (ret)
- ret = -1;
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-static int lbs_adhoc_post(struct lbs_private *priv,
- struct cmd_ds_802_11_ad_hoc_result *resp)
-{
- int ret = 0;
- u16 command = le16_to_cpu(resp->hdr.command);
- u16 result = le16_to_cpu(resp->hdr.result);
- union iwreq_data wrqu;
- struct bss_descriptor *bss;
- DECLARE_SSID_BUF(ssid);
-
- lbs_deb_enter(LBS_DEB_JOIN);
-
- if (!priv->in_progress_assoc_req) {
- lbs_deb_join("ADHOC_RESP: no in-progress association "
- "request\n");
- ret = -1;
- goto done;
- }
- bss = &priv->in_progress_assoc_req->bss;
-
- /*
- * Join result code 0 --> SUCCESS
- */
- if (result) {
- lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
- if (priv->connect_status == LBS_CONNECTED)
- lbs_mac_event_disconnected(priv);
- ret = -1;
- goto done;
- }
-
- /* Send a Media Connected event, according to the Spec */
- priv->connect_status = LBS_CONNECTED;
-
- if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
- /* Update the created network descriptor with the new BSSID */
- memcpy(bss->bssid, resp->bssid, ETH_ALEN);
- }
-
- /* Set the BSSID from the joined/started descriptor */
- memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
-
- /* Set the new SSID to current SSID */
- memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
- priv->curbssparams.ssid_len = bss->ssid_len;
-
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
-
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
- lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
- print_ssid(ssid, bss->ssid, bss->ssid_len),
- priv->curbssparams.bssid,
- priv->channel);
-
-done:
- lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Join an adhoc network found in a previous scan
- *
- * @param priv A pointer to struct lbs_private structure
- * @param assoc_req The association request describing the BSS to join
- *
- * @return 0 on success, error on failure
- */
-static int lbs_adhoc_join(struct lbs_private *priv,
- struct assoc_request *assoc_req)
-{
- struct cmd_ds_802_11_ad_hoc_join cmd;
- struct bss_descriptor *bss = &assoc_req->bss;
- u8 preamble = RADIO_PREAMBLE_LONG;
- DECLARE_SSID_BUF(ssid);
- u16 ratesize = 0;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- lbs_deb_join("current SSID '%s', ssid length %u\n",
- print_ssid(ssid, priv->curbssparams.ssid,
- priv->curbssparams.ssid_len),
- priv->curbssparams.ssid_len);
- lbs_deb_join("requested ssid '%s', ssid length %u\n",
- print_ssid(ssid, bss->ssid, bss->ssid_len),
- bss->ssid_len);
-
- /* check if the requested SSID is already joined */
- if (priv->curbssparams.ssid_len &&
- !lbs_ssid_cmp(priv->curbssparams.ssid,
- priv->curbssparams.ssid_len,
- bss->ssid, bss->ssid_len) &&
- (priv->mode == IW_MODE_ADHOC) &&
- (priv->connect_status == LBS_CONNECTED)) {
- union iwreq_data wrqu;
-
- lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as "
- "current, not attempting to re-join");
-
- /* Send the re-association event though, because the association
- * request really was successful, even if just a null-op.
- */
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid,
- ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
- goto out;
- }
-
- /* Use short preamble only when both the BSS and firmware support it */
- if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- lbs_deb_join("AdhocJoin: Short preamble\n");
- preamble = RADIO_PREAMBLE_SHORT;
- }
-
- ret = lbs_set_radio(priv, preamble, 1);
- if (ret)
- goto out;
-
- lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
- lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
-
- priv->adhoccreate = 0;
- priv->channel = bss->channel;
-
- /* Build the join command */
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
- cmd.bss.type = CMD_BSS_TYPE_IBSS;
- cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
-
- memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
- memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
-
- memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set));
-
- memcpy(&cmd.bss.ibss, &bss->ss.ibss,
- sizeof(struct ieee_ie_ibss_param_set));
-
- cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
- lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
- bss->capability, CAPINFO_MASK);
-
- /* information on BSSID descriptor passed to FW */
- lbs_deb_join("ADHOC_J_CMD: BSSID = %pM, SSID = '%s'\n",
- cmd.bss.bssid, cmd.bss.ssid);
-
- /* Only v8 and below support setting these */
- if (priv->fwrelease < 0x09000000) {
- /* failtimeout */
- cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
- /* probedelay */
- cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
- }
-
- /* Copy Data rates from the rates recorded in scan response */
- memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
- ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), ARRAY_SIZE (bss->rates));
- memcpy(cmd.bss.rates, bss->rates, ratesize);
- if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
- lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
- ret = -1;
- goto out;
- }
-
- /* Copy the ad-hoc creation rates into Current BSS state structure */
- memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
- memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize);
-
- /* Set MSB on basic rates as the firmware requires, but _after_
- * copying to current bss rates.
- */
- lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
-
- cmd.bss.ibss.atimwindow = bss->atimwindow;
-
- if (assoc_req->secinfo.wep_enabled) {
- u16 tmp = le16_to_cpu(cmd.bss.capability);
- tmp |= WLAN_CAPABILITY_PRIVACY;
- cmd.bss.capability = cpu_to_le16(tmp);
- }
-
- if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
- __le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM);
-
- /* wake up first */
- ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
- CMD_ACT_SET, 0, 0,
- &local_ps_mode);
- if (ret) {
- ret = -1;
- goto out;
- }
- }
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
- if (ret == 0) {
- ret = lbs_adhoc_post(priv,
- (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Start an Adhoc Network
- *
- * @param priv A pointer to struct lbs_private structure
- * @param assoc_req The association request describing the BSS to start
- *
- * @return 0 on success, error on failure
- */
-static int lbs_adhoc_start(struct lbs_private *priv,
- struct assoc_request *assoc_req)
-{
- struct cmd_ds_802_11_ad_hoc_start cmd;
- u8 preamble = RADIO_PREAMBLE_SHORT;
- size_t ratesize = 0;
- u16 tmpcap = 0;
- int ret = 0;
- DECLARE_SSID_BUF(ssid);
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- ret = lbs_set_radio(priv, preamble, 1);
- if (ret)
- goto out;
-
- /* Build the start command */
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
- memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
-
- lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
- print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
- assoc_req->ssid_len);
-
- cmd.bsstype = CMD_BSS_TYPE_IBSS;
-
- if (priv->beacon_period == 0)
- priv->beacon_period = MRVDRV_BEACON_INTERVAL;
- cmd.beaconperiod = cpu_to_le16(priv->beacon_period);
-
- WARN_ON(!assoc_req->channel);
-
- /* set Physical parameter set */
- cmd.ds.header.id = WLAN_EID_DS_PARAMS;
- cmd.ds.header.len = 1;
- cmd.ds.channel = assoc_req->channel;
-
- /* set IBSS parameter set */
- cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
- cmd.ibss.header.len = 2;
- cmd.ibss.atimwindow = cpu_to_le16(0);
-
- /* set capability info */
- tmpcap = WLAN_CAPABILITY_IBSS;
- if (assoc_req->secinfo.wep_enabled ||
- assoc_req->secinfo.WPAenabled ||
- assoc_req->secinfo.WPA2enabled) {
- lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n");
- tmpcap |= WLAN_CAPABILITY_PRIVACY;
- } else
- lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n");
-
- cmd.capability = cpu_to_le16(tmpcap);
-
- /* Only v8 and below support setting probe delay */
- if (priv->fwrelease < 0x09000000)
- cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
-
- ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates));
- memcpy(cmd.rates, lbs_bg_rates, ratesize);
-
- /* Copy the ad-hoc creating rates into Current BSS state structure */
- memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
- memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize);
-
- /* Set MSB on basic rates as the firmware requires, but _after_
- * copying to current bss rates.
- */
- lbs_set_basic_rate_flags(cmd.rates, ratesize);
-
- lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
- cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
-
- lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
- assoc_req->channel, assoc_req->band);
-
- priv->adhoccreate = 1;
- priv->mode = IW_MODE_ADHOC;
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
- if (ret == 0)
- ret = lbs_adhoc_post(priv,
- (struct cmd_ds_802_11_ad_hoc_result *)&cmd);
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Stop and Ad-Hoc network and exit Ad-Hoc mode
- *
- * @param priv A pointer to struct lbs_private structure
- * @return 0 on success, or an error
- */
-int lbs_adhoc_stop(struct lbs_private *priv)
-{
- struct cmd_ds_802_11_ad_hoc_stop cmd;
- int ret;
-
- lbs_deb_enter(LBS_DEB_JOIN);
-
- memset(&cmd, 0, sizeof (cmd));
- cmd.hdr.size = cpu_to_le16 (sizeof (cmd));
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
-
- /* Clean up everything even if there was an error */
- lbs_mac_event_disconnected(priv);
-
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
- struct bss_descriptor *match_bss)
-{
- if (!secinfo->wep_enabled &&
- !secinfo->WPAenabled && !secinfo->WPA2enabled &&
- match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
- match_bss->rsn_ie[0] != WLAN_EID_RSN &&
- !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
- return 1;
- else
- return 0;
-}
-
-static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
- struct bss_descriptor *match_bss)
-{
- if (secinfo->wep_enabled &&
- !secinfo->WPAenabled && !secinfo->WPA2enabled &&
- (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
- return 1;
- else
- return 0;
-}
-
-static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
- struct bss_descriptor *match_bss)
-{
- if (!secinfo->wep_enabled && secinfo->WPAenabled &&
- (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
- /* privacy bit may NOT be set in some APs like LinkSys WRT54G
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
- )
- return 1;
- else
- return 0;
-}
-
-static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
- struct bss_descriptor *match_bss)
-{
- if (!secinfo->wep_enabled && secinfo->WPA2enabled &&
- (match_bss->rsn_ie[0] == WLAN_EID_RSN)
- /* privacy bit may NOT be set in some APs like LinkSys WRT54G
- (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
- )
- return 1;
- else
- return 0;
-}
-
-static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
- struct bss_descriptor *match_bss)
-{
- if (!secinfo->wep_enabled &&
- !secinfo->WPAenabled && !secinfo->WPA2enabled &&
- (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
- (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
- (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
- return 1;
- else
- return 0;
-}
-
-/**
- * @brief Check if a scanned network compatible with the driver settings
- *
- * WEP WPA WPA2 ad-hoc encrypt Network
- * enabled enabled enabled AES mode privacy WPA WPA2 Compatible
- * 0 0 0 0 NONE 0 0 0 yes No security
- * 1 0 0 0 NONE 1 0 0 yes Static WEP
- * 0 1 0 0 x 1x 1 x yes WPA
- * 0 0 1 0 x 1x x 1 yes WPA2
- * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
- * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
- *
- *
- * @param priv A pointer to struct lbs_private
- * @param index Index in scantable to check against current driver settings
- * @param mode Network mode: Infrastructure or IBSS
- *
- * @return Index in scantable, or error code if negative
- */
-static int is_network_compatible(struct lbs_private *priv,
- struct bss_descriptor *bss, uint8_t mode)
-{
- int matched = 0;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- if (bss->mode != mode)
- goto done;
-
- matched = match_bss_no_security(&priv->secinfo, bss);
- if (matched)
- goto done;
- matched = match_bss_static_wep(&priv->secinfo, bss);
- if (matched)
- goto done;
- matched = match_bss_wpa(&priv->secinfo, bss);
- if (matched) {
- lbs_deb_scan("is_network_compatible() WPA: wpa_ie 0x%x "
- "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
- "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
- priv->secinfo.wep_enabled ? "e" : "d",
- priv->secinfo.WPAenabled ? "e" : "d",
- priv->secinfo.WPA2enabled ? "e" : "d",
- (bss->capability & WLAN_CAPABILITY_PRIVACY));
- goto done;
- }
- matched = match_bss_wpa2(&priv->secinfo, bss);
- if (matched) {
- lbs_deb_scan("is_network_compatible() WPA2: wpa_ie 0x%x "
- "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
- "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
- priv->secinfo.wep_enabled ? "e" : "d",
- priv->secinfo.WPAenabled ? "e" : "d",
- priv->secinfo.WPA2enabled ? "e" : "d",
- (bss->capability & WLAN_CAPABILITY_PRIVACY));
- goto done;
- }
- matched = match_bss_dynamic_wep(&priv->secinfo, bss);
- if (matched) {
- lbs_deb_scan("is_network_compatible() dynamic WEP: "
- "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n",
- bss->wpa_ie[0], bss->rsn_ie[0],
- (bss->capability & WLAN_CAPABILITY_PRIVACY));
- goto done;
- }
-
- /* bss security settings don't match those configured on card */
- lbs_deb_scan("is_network_compatible() FAILED: wpa_ie 0x%x "
- "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n",
- bss->wpa_ie[0], bss->rsn_ie[0],
- priv->secinfo.wep_enabled ? "e" : "d",
- priv->secinfo.WPAenabled ? "e" : "d",
- priv->secinfo.WPA2enabled ? "e" : "d",
- (bss->capability & WLAN_CAPABILITY_PRIVACY));
-
-done:
- lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched);
- return matched;
-}
-
-/**
- * @brief This function finds a specific compatible BSSID in the scan list
- *
- * Used in association code
- *
- * @param priv A pointer to struct lbs_private
- * @param bssid BSSID to find in the scan list
- * @param mode Network mode: Infrastructure or IBSS
- *
- * @return index in BSSID list, or error return code (< 0)
- */
-static struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv,
- uint8_t *bssid, uint8_t mode)
-{
- struct bss_descriptor *iter_bss;
- struct bss_descriptor *found_bss = NULL;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- if (!bssid)
- goto out;
-
- lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN);
-
- /* Look through the scan table for a compatible match. The loop will
- * continue past a matched bssid that is not compatible in case there
- * is an AP with multiple SSIDs assigned to the same BSSID
- */
- mutex_lock(&priv->lock);
- list_for_each_entry(iter_bss, &priv->network_list, list) {
- if (compare_ether_addr(iter_bss->bssid, bssid))
- continue; /* bssid doesn't match */
- switch (mode) {
- case IW_MODE_INFRA:
- case IW_MODE_ADHOC:
- if (!is_network_compatible(priv, iter_bss, mode))
- break;
- found_bss = iter_bss;
- break;
- default:
- found_bss = iter_bss;
- break;
- }
- }
- mutex_unlock(&priv->lock);
-
-out:
- lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
- return found_bss;
-}
-
-/**
- * @brief This function finds ssid in ssid list.
- *
- * Used in association code
- *
- * @param priv A pointer to struct lbs_private
- * @param ssid SSID to find in the list
- * @param bssid BSSID to qualify the SSID selection (if provided)
- * @param mode Network mode: Infrastructure or IBSS
- *
- * @return index in BSSID list
- */
-static struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv,
- uint8_t *ssid, uint8_t ssid_len,
- uint8_t *bssid, uint8_t mode,
- int channel)
-{
- u32 bestrssi = 0;
- struct bss_descriptor *iter_bss = NULL;
- struct bss_descriptor *found_bss = NULL;
- struct bss_descriptor *tmp_oldest = NULL;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- mutex_lock(&priv->lock);
-
- list_for_each_entry(iter_bss, &priv->network_list, list) {
- if (!tmp_oldest ||
- (iter_bss->last_scanned < tmp_oldest->last_scanned))
- tmp_oldest = iter_bss;
-
- if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len,
- ssid, ssid_len) != 0)
- continue; /* ssid doesn't match */
- if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0)
- continue; /* bssid doesn't match */
- if ((channel > 0) && (iter_bss->channel != channel))
- continue; /* channel doesn't match */
-
- switch (mode) {
- case IW_MODE_INFRA:
- case IW_MODE_ADHOC:
- if (!is_network_compatible(priv, iter_bss, mode))
- break;
-
- if (bssid) {
- /* Found requested BSSID */
- found_bss = iter_bss;
- goto out;
- }
-
- if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
- bestrssi = SCAN_RSSI(iter_bss->rssi);
- found_bss = iter_bss;
- }
- break;
- case IW_MODE_AUTO:
- default:
- if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
- bestrssi = SCAN_RSSI(iter_bss->rssi);
- found_bss = iter_bss;
- }
- break;
- }
- }
-
-out:
- mutex_unlock(&priv->lock);
- lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
- return found_bss;
-}
-
-static int assoc_helper_essid(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
- struct bss_descriptor * bss;
- int channel = -1;
- DECLARE_SSID_BUF(ssid);
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- /* FIXME: take channel into account when picking SSIDs if a channel
- * is set.
- */
-
- if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
- channel = assoc_req->channel;
-
- lbs_deb_assoc("SSID '%s' requested\n",
- print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len));
- if (assoc_req->mode == IW_MODE_INFRA) {
- lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
- assoc_req->ssid_len);
-
- bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
- assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel);
- if (bss != NULL) {
- memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
- ret = lbs_try_associate(priv, assoc_req);
- } else {
- lbs_deb_assoc("SSID not found; cannot associate\n");
- }
- } else if (assoc_req->mode == IW_MODE_ADHOC) {
- /* Scan for the network, do not save previous results. Stale
- * scan data will cause us to join a non-existant adhoc network
- */
- lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
- assoc_req->ssid_len);
-
- /* Search for the requested SSID in the scan table */
- bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
- assoc_req->ssid_len, NULL, IW_MODE_ADHOC, channel);
- if (bss != NULL) {
- lbs_deb_assoc("SSID found, will join\n");
- memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
- lbs_adhoc_join(priv, assoc_req);
- } else {
- /* else send START command */
- lbs_deb_assoc("SSID not found, creating adhoc network\n");
- memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
- IEEE80211_MAX_SSID_LEN);
- assoc_req->bss.ssid_len = assoc_req->ssid_len;
- lbs_adhoc_start(priv, assoc_req);
- }
- }
-
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int assoc_helper_bssid(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
- struct bss_descriptor * bss;
-
- lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %pM", assoc_req->bssid);
-
- /* Search for index position in list for requested MAC */
- bss = lbs_find_bssid_in_list(priv, assoc_req->bssid,
- assoc_req->mode);
- if (bss == NULL) {
- lbs_deb_assoc("ASSOC: WAP: BSSID %pM not found, "
- "cannot associate.\n", assoc_req->bssid);
- goto out;
- }
-
- memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
- if (assoc_req->mode == IW_MODE_INFRA) {
- ret = lbs_try_associate(priv, assoc_req);
- lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n",
- ret);
- } else if (assoc_req->mode == IW_MODE_ADHOC) {
- lbs_adhoc_join(priv, assoc_req);
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int assoc_helper_associate(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0, done = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- /* If we're given and 'any' BSSID, try associating based on SSID */
-
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
- compare_ether_addr(bssid_off, assoc_req->bssid)) {
- ret = assoc_helper_bssid(priv, assoc_req);
- done = 1;
- }
- }
-
- if (!done && test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
- ret = assoc_helper_essid(priv, assoc_req);
- }
-
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int assoc_helper_mode(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- if (assoc_req->mode == priv->mode)
- goto done;
-
- if (assoc_req->mode == IW_MODE_INFRA) {
- if (priv->psstate != PS_STATE_FULL_POWER)
- lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
- priv->psmode = LBS802_11POWERMODECAM;
- }
-
- priv->mode = assoc_req->mode;
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
- assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
-
-done:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-static int assoc_helper_channel(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- ret = lbs_update_channel(priv);
- if (ret) {
- lbs_deb_assoc("ASSOC: channel: error getting channel.\n");
- goto done;
- }
-
- if (assoc_req->channel == priv->channel)
- goto done;
-
- if (priv->mesh_dev) {
- /* Change mesh channel first; 21.p21 firmware won't let
- you change channel otherwise (even though it'll return
- an error to this */
- lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
- assoc_req->channel);
- }
-
- lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
- priv->channel, assoc_req->channel);
-
- ret = lbs_set_channel(priv, assoc_req->channel);
- if (ret < 0)
- lbs_deb_assoc("ASSOC: channel: error setting channel.\n");
-
- /* FIXME: shouldn't need to grab the channel _again_ after setting
- * it since the firmware is supposed to return the new channel, but
- * whatever... */
- ret = lbs_update_channel(priv);
- if (ret) {
- lbs_deb_assoc("ASSOC: channel: error getting channel.\n");
- goto done;
- }
-
- if (assoc_req->channel != priv->channel) {
- lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
- assoc_req->channel);
- goto restore_mesh;
- }
-
- if (assoc_req->secinfo.wep_enabled &&
- (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
- assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
- /* Make sure WEP keys are re-sent to firmware */
- set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
- }
-
- /* Must restart/rejoin adhoc networks after channel change */
- set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
-
- restore_mesh:
- if (priv->mesh_dev)
- lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel);
-
- done:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int assoc_helper_wep_keys(struct lbs_private *priv,
- struct assoc_request *assoc_req)
-{
- int i;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- /* Set or remove WEP keys */
- if (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
- assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)
- ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_ADD, assoc_req);
- else
- ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_REMOVE, assoc_req);
-
- if (ret)
- goto out;
-
- /* enable/disable the MAC's WEP packet filter */
- if (assoc_req->secinfo.wep_enabled)
- priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
- else
- priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
-
- lbs_set_mac_control(priv);
-
- mutex_lock(&priv->lock);
-
- /* Copy WEP keys into priv wep key fields */
- for (i = 0; i < 4; i++) {
- memcpy(&priv->wep_keys[i], &assoc_req->wep_keys[i],
- sizeof(struct enc_key));
- }
- priv->wep_tx_keyidx = assoc_req->wep_tx_keyidx;
-
- mutex_unlock(&priv->lock);
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-static int assoc_helper_secinfo(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
- uint16_t do_wpa;
- uint16_t rsn = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- memcpy(&priv->secinfo, &assoc_req->secinfo,
- sizeof(struct lbs_802_11_security));
-
- lbs_set_mac_control(priv);
-
- /* If RSN is already enabled, don't try to enable it again, since
- * ENABLE_RSN resets internal state machines and will clobber the
- * 4-way WPA handshake.
- */
-
- /* Get RSN enabled/disabled */
- ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_GET, &rsn);
- if (ret) {
- lbs_deb_assoc("Failed to get RSN status: %d\n", ret);
- goto out;
- }
-
- /* Don't re-enable RSN if it's already enabled */
- do_wpa = assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled;
- if (do_wpa == rsn)
- goto out;
-
- /* Set RSN enabled/disabled */
- ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_SET, &do_wpa);
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int assoc_helper_wpa_keys(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
- unsigned int flags = assoc_req->flags;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- /* Work around older firmware bug where WPA unicast and multicast
- * keys must be set independently. Seen in SDIO parts with firmware
- * version 5.0.11p0.
- */
-
- if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
- clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
- ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
- assoc_req->flags = flags;
- }
-
- if (ret)
- goto out;
-
- memcpy(&priv->wpa_unicast_key, &assoc_req->wpa_unicast_key,
- sizeof(struct enc_key));
-
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
- clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
-
- ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
- assoc_req->flags = flags;
-
- memcpy(&priv->wpa_mcast_key, &assoc_req->wpa_mcast_key,
- sizeof(struct enc_key));
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int assoc_helper_wpa_ie(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
- memcpy(&priv->wpa_ie, &assoc_req->wpa_ie, assoc_req->wpa_ie_len);
- priv->wpa_ie_len = assoc_req->wpa_ie_len;
- } else {
- memset(&priv->wpa_ie, 0, MAX_WPA_IE_LEN);
- priv->wpa_ie_len = 0;
- }
-
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int should_deauth_infrastructure(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- int ret = 0;
-
- if (priv->connect_status != LBS_CONNECTED)
- return 0;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
- if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
- lbs_deb_assoc("Deauthenticating due to new SSID\n");
- ret = 1;
- goto out;
- }
-
- if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
- if (priv->secinfo.auth_mode != assoc_req->secinfo.auth_mode) {
- lbs_deb_assoc("Deauthenticating due to new security\n");
- ret = 1;
- goto out;
- }
- }
-
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- lbs_deb_assoc("Deauthenticating due to new BSSID\n");
- ret = 1;
- goto out;
- }
-
- if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
- lbs_deb_assoc("Deauthenticating due to channel switch\n");
- ret = 1;
- goto out;
- }
-
- /* FIXME: deal with 'auto' mode somehow */
- if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
- if (assoc_req->mode != IW_MODE_INFRA) {
- lbs_deb_assoc("Deauthenticating due to leaving "
- "infra mode\n");
- ret = 1;
- goto out;
- }
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
- return ret;
-}
-
-
-static int should_stop_adhoc(struct lbs_private *priv,
- struct assoc_request * assoc_req)
-{
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- if (priv->connect_status != LBS_CONNECTED)
- return 0;
-
- if (lbs_ssid_cmp(priv->curbssparams.ssid,
- priv->curbssparams.ssid_len,
- assoc_req->ssid, assoc_req->ssid_len) != 0)
- return 1;
-
- /* FIXME: deal with 'auto' mode somehow */
- if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
- if (assoc_req->mode != IW_MODE_ADHOC)
- return 1;
- }
-
- if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
- if (assoc_req->channel != priv->channel)
- return 1;
- }
-
- lbs_deb_leave(LBS_DEB_ASSOC);
- return 0;
-}
-
-
-/**
- * @brief This function finds the best SSID in the Scan List
- *
- * Search the scan table for the best SSID that also matches the current
- * adapter network preference (infrastructure or adhoc)
- *
- * @param priv A pointer to struct lbs_private
- *
- * @return index in BSSID list
- */
-static struct bss_descriptor *lbs_find_best_ssid_in_list(
- struct lbs_private *priv, uint8_t mode)
-{
- uint8_t bestrssi = 0;
- struct bss_descriptor *iter_bss;
- struct bss_descriptor *best_bss = NULL;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- mutex_lock(&priv->lock);
-
- list_for_each_entry(iter_bss, &priv->network_list, list) {
- switch (mode) {
- case IW_MODE_INFRA:
- case IW_MODE_ADHOC:
- if (!is_network_compatible(priv, iter_bss, mode))
- break;
- if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
- break;
- bestrssi = SCAN_RSSI(iter_bss->rssi);
- best_bss = iter_bss;
- break;
- case IW_MODE_AUTO:
- default:
- if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
- break;
- bestrssi = SCAN_RSSI(iter_bss->rssi);
- best_bss = iter_bss;
- break;
- }
- }
-
- mutex_unlock(&priv->lock);
- lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss);
- return best_bss;
-}
-
-/**
- * @brief Find the best AP
- *
- * Used from association worker.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param pSSID A pointer to AP's ssid
- *
- * @return 0--success, otherwise--fail
- */
-static int lbs_find_best_network_ssid(struct lbs_private *priv,
- uint8_t *out_ssid, uint8_t *out_ssid_len, uint8_t preferred_mode,
- uint8_t *out_mode)
-{
- int ret = -1;
- struct bss_descriptor *found;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- priv->scan_ssid_len = 0;
- lbs_scan_networks(priv, 1);
- if (priv->surpriseremoved)
- goto out;
-
- found = lbs_find_best_ssid_in_list(priv, preferred_mode);
- if (found && (found->ssid_len > 0)) {
- memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
- *out_ssid_len = found->ssid_len;
- *out_mode = found->mode;
- ret = 0;
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
- return ret;
-}
-
-
-void lbs_association_worker(struct work_struct *work)
-{
- struct lbs_private *priv = container_of(work, struct lbs_private,
- assoc_work.work);
- struct assoc_request * assoc_req = NULL;
- int ret = 0;
- int find_any_ssid = 0;
- DECLARE_SSID_BUF(ssid);
-
- lbs_deb_enter(LBS_DEB_ASSOC);
-
- mutex_lock(&priv->lock);
- assoc_req = priv->pending_assoc_req;
- priv->pending_assoc_req = NULL;
- priv->in_progress_assoc_req = assoc_req;
- mutex_unlock(&priv->lock);
-
- if (!assoc_req)
- goto done;
-
- lbs_deb_assoc(
- "Association Request:\n"
- " flags: 0x%08lx\n"
- " SSID: '%s'\n"
- " chann: %d\n"
- " band: %d\n"
- " mode: %d\n"
- " BSSID: %pM\n"
- " secinfo: %s%s%s\n"
- " auth_mode: %d\n",
- assoc_req->flags,
- print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
- assoc_req->channel, assoc_req->band, assoc_req->mode,
- assoc_req->bssid,
- assoc_req->secinfo.WPAenabled ? " WPA" : "",
- assoc_req->secinfo.WPA2enabled ? " WPA2" : "",
- assoc_req->secinfo.wep_enabled ? " WEP" : "",
- assoc_req->secinfo.auth_mode);
-
- /* If 'any' SSID was specified, find an SSID to associate with */
- if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
- !assoc_req->ssid_len)
- find_any_ssid = 1;
-
- /* But don't use 'any' SSID if there's a valid locked BSSID to use */
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
- compare_ether_addr(assoc_req->bssid, bssid_off))
- find_any_ssid = 0;
- }
-
- if (find_any_ssid) {
- u8 new_mode = assoc_req->mode;
-
- ret = lbs_find_best_network_ssid(priv, assoc_req->ssid,
- &assoc_req->ssid_len, assoc_req->mode, &new_mode);
- if (ret) {
- lbs_deb_assoc("Could not find best network\n");
- ret = -ENETUNREACH;
- goto out;
- }
-
- /* Ensure we switch to the mode of the AP */
- if (assoc_req->mode == IW_MODE_AUTO) {
- set_bit(ASSOC_FLAG_MODE, &assoc_req->flags);
- assoc_req->mode = new_mode;
- }
- }
-
- /*
- * Check if the attributes being changing require deauthentication
- * from the currently associated infrastructure access point.
- */
- if (priv->mode == IW_MODE_INFRA) {
- if (should_deauth_infrastructure(priv, assoc_req)) {
- ret = lbs_cmd_80211_deauthenticate(priv,
- priv->curbssparams.bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- if (ret) {
- lbs_deb_assoc("Deauthentication due to new "
- "configuration request failed: %d\n",
- ret);
- }
- }
- } else if (priv->mode == IW_MODE_ADHOC) {
- if (should_stop_adhoc(priv, assoc_req)) {
- ret = lbs_adhoc_stop(priv);
- if (ret) {
- lbs_deb_assoc("Teardown of AdHoc network due to "
- "new configuration request failed: %d\n",
- ret);
- }
-
- }
- }
-
- /* Send the various configuration bits to the firmware */
- if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) {
- ret = assoc_helper_mode(priv, assoc_req);
- if (ret)
- goto out;
- }
-
- if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
- ret = assoc_helper_channel(priv, assoc_req);
- if (ret)
- goto out;
- }
-
- if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
- ret = assoc_helper_secinfo(priv, assoc_req);
- if (ret)
- goto out;
- }
-
- if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) {
- ret = assoc_helper_wpa_ie(priv, assoc_req);
- if (ret)
- goto out;
- }
-
- /*
- * v10 FW wants WPA keys to be set/cleared before WEP key operations,
- * otherwise it will fail to correctly associate to WEP networks.
- * Other firmware versions don't appear to care.
- */
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
- test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
- ret = assoc_helper_wpa_keys(priv, assoc_req);
- if (ret)
- goto out;
- }
-
- if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
- test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
- ret = assoc_helper_wep_keys(priv, assoc_req);
- if (ret)
- goto out;
- }
-
-
- /* SSID/BSSID should be the _last_ config option set, because they
- * trigger the association attempt.
- */
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
- test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
- int success = 1;
-
- ret = assoc_helper_associate(priv, assoc_req);
- if (ret) {
- lbs_deb_assoc("ASSOC: association unsuccessful: %d\n",
- ret);
- success = 0;
- }
-
- if (priv->connect_status != LBS_CONNECTED) {
- lbs_deb_assoc("ASSOC: association unsuccessful, "
- "not connected\n");
- success = 0;
- }
-
- if (success) {
- lbs_deb_assoc("associated to %pM\n",
- priv->curbssparams.bssid);
- lbs_prepare_and_send_command(priv,
- CMD_802_11_RSSI,
- 0, CMD_OPTION_WAITFORRSP, 0, NULL);
- } else {
- ret = -1;
- }
- }
-
-out:
- if (ret) {
- lbs_deb_assoc("ASSOC: reconfiguration attempt unsuccessful: %d\n",
- ret);
- }
-
- mutex_lock(&priv->lock);
- priv->in_progress_assoc_req = NULL;
- mutex_unlock(&priv->lock);
- kfree(assoc_req);
-
-done:
- lbs_deb_leave(LBS_DEB_ASSOC);
-}
-
-
-/*
- * Caller MUST hold any necessary locks
- */
-struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
-{
- struct assoc_request * assoc_req;
-
- lbs_deb_enter(LBS_DEB_ASSOC);
- if (!priv->pending_assoc_req) {
- priv->pending_assoc_req = kzalloc(sizeof(struct assoc_request),
- GFP_KERNEL);
- if (!priv->pending_assoc_req) {
- lbs_pr_info("Not enough memory to allocate association"
- " request!\n");
- return NULL;
- }
- }
-
- /* Copy current configuration attributes to the association request,
- * but don't overwrite any that are already set.
- */
- assoc_req = priv->pending_assoc_req;
- if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
- memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
- IEEE80211_MAX_SSID_LEN);
- assoc_req->ssid_len = priv->curbssparams.ssid_len;
- }
-
- if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
- assoc_req->channel = priv->channel;
-
- if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
- assoc_req->band = priv->curbssparams.band;
-
- if (!test_bit(ASSOC_FLAG_MODE, &assoc_req->flags))
- assoc_req->mode = priv->mode;
-
- if (!test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- memcpy(&assoc_req->bssid, priv->curbssparams.bssid,
- ETH_ALEN);
- }
-
- if (!test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)) {
- int i;
- for (i = 0; i < 4; i++) {
- memcpy(&assoc_req->wep_keys[i], &priv->wep_keys[i],
- sizeof(struct enc_key));
- }
- }
-
- if (!test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags))
- assoc_req->wep_tx_keyidx = priv->wep_tx_keyidx;
-
- if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
- memcpy(&assoc_req->wpa_mcast_key, &priv->wpa_mcast_key,
- sizeof(struct enc_key));
- }
-
- if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
- memcpy(&assoc_req->wpa_unicast_key, &priv->wpa_unicast_key,
- sizeof(struct enc_key));
- }
-
- if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
- memcpy(&assoc_req->secinfo, &priv->secinfo,
- sizeof(struct lbs_802_11_security));
- }
-
- if (!test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) {
- memcpy(&assoc_req->wpa_ie, &priv->wpa_ie,
- MAX_WPA_IE_LEN);
- assoc_req->wpa_ie_len = priv->wpa_ie_len;
- }
-
- lbs_deb_leave(LBS_DEB_ASSOC);
- return assoc_req;
-}
-
-
-/**
- * @brief Deauthenticate from a specific BSS
- *
- * @param priv A pointer to struct lbs_private structure
- * @param bssid The specific BSS to deauthenticate from
- * @param reason The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating
- *
- * @return 0 on success, error on failure
- */
-int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
- u16 reason)
-{
- struct cmd_ds_802_11_deauthenticate cmd;
- int ret;
-
- lbs_deb_enter(LBS_DEB_JOIN);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- memcpy(cmd.macaddr, &bssid[0], ETH_ALEN);
- cmd.reasoncode = cpu_to_le16(reason);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
-
- /* Clean up everything even if there was an error; can't assume that
- * we're still authenticated to the AP after trying to deauth.
- */
- lbs_mac_event_disconnected(priv);
-
- lbs_deb_leave(LBS_DEB_JOIN);
- return ret;
-}
-
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
deleted file mode 100644
index 40621b7..0000000
--- a/drivers/net/wireless/libertas/assoc.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* Copyright (C) 2006, Red Hat, Inc. */
-
-#ifndef _LBS_ASSOC_H_
-#define _LBS_ASSOC_H_
-
-
-#include "defs.h"
-#include "host.h"
-
-
-struct lbs_private;
-
-/*
- * In theory, the IE is limited to the IE length, 255,
- * but in practice 64 bytes are enough.
- */
-#define MAX_WPA_IE_LEN 64
-
-
-
-struct lbs_802_11_security {
- u8 WPAenabled;
- u8 WPA2enabled;
- u8 wep_enabled;
- u8 auth_mode;
- u32 key_mgmt;
-};
-
-/** Current Basic Service Set State Structure */
-struct current_bss_params {
- /** bssid */
- u8 bssid[ETH_ALEN];
- /** ssid */
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
-
- /** band */
- u8 band;
- /** channel is directly in priv->channel */
- /** zero-terminated array of supported data rates */
- u8 rates[MAX_RATES + 1];
-};
-
-/**
- * @brief Structure used to store information for each beacon/probe response
- */
-struct bss_descriptor {
- u8 bssid[ETH_ALEN];
-
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
-
- u16 capability;
- u32 rssi;
- u32 channel;
- u16 beaconperiod;
- __le16 atimwindow;
-
- /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
- u8 mode;
-
- /* zero-terminated array of supported data rates */
- u8 rates[MAX_RATES + 1];
-
- unsigned long last_scanned;
-
- union ieee_phy_param_set phy;
- union ieee_ss_param_set ss;
-
- u8 wpa_ie[MAX_WPA_IE_LEN];
- size_t wpa_ie_len;
- u8 rsn_ie[MAX_WPA_IE_LEN];
- size_t rsn_ie_len;
-
- u8 mesh;
-
- struct list_head list;
-};
-
-/** Association request
- *
- * Encapsulates all the options that describe a specific assocation request
- * or configuration of the wireless card's radio, mode, and security settings.
- */
-struct assoc_request {
-#define ASSOC_FLAG_SSID 1
-#define ASSOC_FLAG_CHANNEL 2
-#define ASSOC_FLAG_BAND 3
-#define ASSOC_FLAG_MODE 4
-#define ASSOC_FLAG_BSSID 5
-#define ASSOC_FLAG_WEP_KEYS 6
-#define ASSOC_FLAG_WEP_TX_KEYIDX 7
-#define ASSOC_FLAG_WPA_MCAST_KEY 8
-#define ASSOC_FLAG_WPA_UCAST_KEY 9
-#define ASSOC_FLAG_SECINFO 10
-#define ASSOC_FLAG_WPA_IE 11
- unsigned long flags;
-
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
- u8 channel;
- u8 band;
- u8 mode;
- u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
-
- /** WEP keys */
- struct enc_key wep_keys[4];
- u16 wep_tx_keyidx;
-
- /** WPA keys */
- struct enc_key wpa_mcast_key;
- struct enc_key wpa_unicast_key;
-
- struct lbs_802_11_security secinfo;
-
- /** WPA Information Elements*/
- u8 wpa_ie[MAX_WPA_IE_LEN];
- u8 wpa_ie_len;
-
- /* BSS to associate with for infrastructure of Ad-Hoc join */
- struct bss_descriptor bss;
-};
-
-
-extern u8 lbs_bg_rates[MAX_RATES];
-
-void lbs_association_worker(struct work_struct *work);
-struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
-
-int lbs_adhoc_stop(struct lbs_private *priv);
-
-int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
- u8 bssid[ETH_ALEN], u16 reason);
-
-int lbs_cmd_802_11_rssi(struct lbs_private *priv,
- struct cmd_ds_command *cmd);
-int lbs_ret_802_11_rssi(struct lbs_private *priv,
- struct cmd_ds_command *resp);
-
-int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
- struct cmd_ds_command *cmd,
- u16 cmd_action);
-int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
- struct cmd_ds_command *resp);
-
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
- struct assoc_request *assoc);
-
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
- uint16_t *enable);
-
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
- struct assoc_request *assoc);
-
-#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 9d5d3cc..f36cc97 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -7,8 +7,12 @@
*/
#include <linux/slab.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
#include <net/cfg80211.h>
+#include <asm/unaligned.h>
+#include "decl.h"
#include "cfg.h"
#include "cmd.h"
@@ -39,26 +43,27 @@ static struct ieee80211_channel lbs_2ghz_channels[] = {
CHAN2G(14, 2484, 0),
};
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
- .bitrate = (_rate), \
- .hw_value = (_rateid), \
- .flags = (_flags), \
+#define RATETAB_ENT(_rate, _hw_value, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_hw_value), \
+ .flags = (_flags), \
}
+/* Table 6 in section 3.2.1.1 */
static struct ieee80211_rate lbs_rates[] = {
- RATETAB_ENT(10, 0x1, 0),
- RATETAB_ENT(20, 0x2, 0),
- RATETAB_ENT(55, 0x4, 0),
- RATETAB_ENT(110, 0x8, 0),
- RATETAB_ENT(60, 0x10, 0),
- RATETAB_ENT(90, 0x20, 0),
- RATETAB_ENT(120, 0x40, 0),
- RATETAB_ENT(180, 0x80, 0),
- RATETAB_ENT(240, 0x100, 0),
- RATETAB_ENT(360, 0x200, 0),
- RATETAB_ENT(480, 0x400, 0),
- RATETAB_ENT(540, 0x800, 0),
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, 0),
+ RATETAB_ENT(55, 2, 0),
+ RATETAB_ENT(110, 3, 0),
+ RATETAB_ENT(60, 9, 0),
+ RATETAB_ENT(90, 6, 0),
+ RATETAB_ENT(120, 7, 0),
+ RATETAB_ENT(180, 8, 0),
+ RATETAB_ENT(240, 9, 0),
+ RATETAB_ENT(360, 10, 0),
+ RATETAB_ENT(480, 11, 0),
+ RATETAB_ENT(540, 12, 0),
};
static struct ieee80211_supported_band lbs_band_2ghz = {
@@ -76,22 +81,639 @@ static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_CCMP,
};
+/* Time to stay on the channel */
+#define LBS_DWELL_PASSIVE 100
+#define LBS_DWELL_ACTIVE 40
+/***************************************************************************
+ * Misc utility functions
+ *
+ * TLVs are Marvell specific. They are very similar to IEs, they have the
+ * same structure: type, length, data*. The only difference: for IEs, the
+ * type and length are u8, but for TLVs they're __le16.
+ */
+
+/*
+ * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
+ * in the firmware spec
+ */
+static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
+{
+ int ret = -ENOTSUPP;
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ ret = auth_type;
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ ret = NL80211_AUTHTYPE_OPEN_SYSTEM;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ ret = 0x80;
+ break;
+ default:
+ /* silence compiler */
+ break;
+ }
+ return ret;
+}
+
+
+/* Various firmware commands need the list of supported rates, but with
+ the hight-bit set for basic rates */
+static int lbs_add_rates(u8 *rates)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
+ u8 rate = lbs_rates[i].bitrate / 5;
+ if (rate == 0x02 || rate == 0x04 ||
+ rate == 0x0b || rate == 0x16)
+ rate |= 0x80;
+ rates[i] = rate;
+ }
+ return ARRAY_SIZE(lbs_rates);
+}
+
+
+/***************************************************************************
+ * TLV utility functions
+ *
+ * TLVs are Marvell specific. They are very similar to IEs, they have the
+ * same structure: type, length, data*. The only difference: for IEs, the
+ * type and length are u8, but for TLVs they're __le16.
+ */
+
+
+/*
+ * Add ssid TLV
+ */
+#define LBS_MAX_SSID_TLV_SIZE \
+ (sizeof(struct mrvl_ie_header) \
+ + IEEE80211_MAX_SSID_LEN)
+
+static int lbs_add_ssid_tlv(u8 *tlv, const u8 *ssid, int ssid_len)
+{
+ struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
+
+ /*
+ * TLV-ID SSID 00 00
+ * length 06 00
+ * ssid 4d 4e 54 45 53 54
+ */
+ ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
+ ssid_tlv->header.len = cpu_to_le16(ssid_len);
+ memcpy(ssid_tlv->ssid, ssid, ssid_len);
+ return sizeof(ssid_tlv->header) + ssid_len;
+}
+
+
+/*
+ * Add channel list TLV (section 8.4.2)
+ *
+ * Actual channel data comes from priv->wdev->wiphy->channels.
+ */
+#define LBS_MAX_CHANNEL_LIST_TLV_SIZE \
+ (sizeof(struct mrvl_ie_header) \
+ + (LBS_SCAN_BEFORE_NAP * sizeof(struct chanscanparamset)))
+
+static int lbs_add_channel_list_tlv(struct lbs_private *priv, u8 *tlv,
+ int last_channel, int active_scan)
+{
+ int chanscanparamsize = sizeof(struct chanscanparamset) *
+ (last_channel - priv->scan_channel);
+
+ struct mrvl_ie_header *header = (void *) tlv;
+
+ /*
+ * TLV-ID CHANLIST 01 01
+ * length 0e 00
+ * channel 00 01 00 00 00 64 00
+ * radio type 00
+ * channel 01
+ * scan type 00
+ * min scan time 00 00
+ * max scan time 64 00
+ * channel 2 00 02 00 00 00 64 00
+ *
+ */
+
+ header->type = cpu_to_le16(TLV_TYPE_CHANLIST);
+ header->len = cpu_to_le16(chanscanparamsize);
+ tlv += sizeof(struct mrvl_ie_header);
+
+ /* lbs_deb_scan("scan: channels %d to %d\n", priv->scan_channel,
+ last_channel); */
+ memset(tlv, 0, chanscanparamsize);
+
+ while (priv->scan_channel < last_channel) {
+ struct chanscanparamset *param = (void *) tlv;
+
+ param->radiotype = CMD_SCAN_RADIO_TYPE_BG;
+ param->channumber =
+ priv->scan_req->channels[priv->scan_channel]->hw_value;
+ if (active_scan) {
+ param->maxscantime = cpu_to_le16(LBS_DWELL_ACTIVE);
+ } else {
+ param->chanscanmode.passivescan = 1;
+ param->maxscantime = cpu_to_le16(LBS_DWELL_PASSIVE);
+ }
+ tlv += sizeof(struct chanscanparamset);
+ priv->scan_channel++;
+ }
+ return sizeof(struct mrvl_ie_header) + chanscanparamsize;
+}
+
+
+/*
+ * Add rates TLV
+ *
+ * The rates are in lbs_bg_rates[], but for the 802.11b
+ * rates the high bit is set. We add this TLV only because
+ * there's a firmware which otherwise doesn't report all
+ * APs in range.
+ */
+#define LBS_MAX_RATES_TLV_SIZE \
+ (sizeof(struct mrvl_ie_header) \
+ + (ARRAY_SIZE(lbs_rates)))
+
+/* Adds a TLV with all rates the hardware supports */
+static int lbs_add_supported_rates_tlv(u8 *tlv)
+{
+ size_t i;
+ struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
+
+ /*
+ * TLV-ID RATES 01 00
+ * length 0e 00
+ * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c
+ */
+ rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
+ tlv += sizeof(rate_tlv->header);
+ i = lbs_add_rates(tlv);
+ tlv += i;
+ rate_tlv->header.len = cpu_to_le16(i);
+ return sizeof(rate_tlv->header) + i;
+}
+
+
+/*
+ * Adds a TLV with all rates the hardware *and* BSS supports.
+ */
+static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
+{
+ struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
+ const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
+ int n;
+
+ /*
+ * 01 00 TLV_TYPE_RATES
+ * 04 00 len
+ * 82 84 8b 96 rates
+ */
+ rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
+ tlv += sizeof(rate_tlv->header);
+
+ if (!rates_eid) {
+ /* Fallback: add basic 802.11b rates */
+ *tlv++ = 0x82;
+ *tlv++ = 0x84;
+ *tlv++ = 0x8b;
+ *tlv++ = 0x96;
+ n = 4;
+ } else {
+ int hw, ap;
+ u8 ap_max = rates_eid[1];
+ n = 0;
+ for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
+ u8 hw_rate = lbs_rates[hw].bitrate / 5;
+ for (ap = 0; ap < ap_max; ap++) {
+ if (hw_rate == (rates_eid[ap+2] & 0x7f)) {
+ *tlv++ = rates_eid[ap+2];
+ n++;
+ }
+ }
+ }
+ }
+
+ rate_tlv->header.len = cpu_to_le16(n);
+ return sizeof(rate_tlv->header) + n;
+}
+
+
+/*
+ * Add auth type TLV.
+ *
+ * This is only needed for newer firmware (V9 and up).
+ */
+#define LBS_MAX_AUTH_TYPE_TLV_SIZE \
+ sizeof(struct mrvl_ie_auth_type)
+
+static int lbs_add_auth_type_tlv(u8 *tlv, enum nl80211_auth_type auth_type)
+{
+ struct mrvl_ie_auth_type *auth = (void *) tlv;
+
+ /*
+ * 1f 01 TLV_TYPE_AUTH_TYPE
+ * 01 00 len
+ * 01 auth type
+ */
+ auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+ auth->header.len = cpu_to_le16(sizeof(*auth)-sizeof(auth->header));
+ auth->auth = cpu_to_le16(lbs_auth_to_authtype(auth_type));
+ return sizeof(*auth);
+}
+
+
+/*
+ * Add channel (phy ds) TLV
+ */
+#define LBS_MAX_CHANNEL_TLV_SIZE \
+ sizeof(struct mrvl_ie_header)
+
+static int lbs_add_channel_tlv(u8 *tlv, u8 channel)
+{
+ struct mrvl_ie_ds_param_set *ds = (void *) tlv;
+
+ /*
+ * 03 00 TLV_TYPE_PHY_DS
+ * 01 00 len
+ * 06 channel
+ */
+ ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
+ ds->header.len = cpu_to_le16(sizeof(*ds)-sizeof(ds->header));
+ ds->channel = channel;
+ return sizeof(*ds);
+}
+
+
+/*
+ * Add (empty) CF param TLV of the form:
+ */
+#define LBS_MAX_CF_PARAM_TLV_SIZE \
+ sizeof(struct mrvl_ie_header)
+
+static int lbs_add_cf_param_tlv(u8 *tlv)
+{
+ struct mrvl_ie_cf_param_set *cf = (void *)tlv;
+
+ /*
+ * 04 00 TLV_TYPE_CF
+ * 06 00 len
+ * 00 cfpcnt
+ * 00 cfpperiod
+ * 00 00 cfpmaxduration
+ * 00 00 cfpdurationremaining
+ */
+ cf->header.type = cpu_to_le16(TLV_TYPE_CF);
+ cf->header.len = cpu_to_le16(sizeof(*cf)-sizeof(cf->header));
+ return sizeof(*cf);
+}
+
+/*
+ * Add WPA TLV
+ */
+#define LBS_MAX_WPA_TLV_SIZE \
+ (sizeof(struct mrvl_ie_header) \
+ + 128 /* TODO: I guessed the size */)
+
+static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
+{
+ size_t tlv_len;
+
+ /*
+ * We need just convert an IE to an TLV. IEs use u8 for the header,
+ * u8 type
+ * u8 len
+ * u8[] data
+ * but TLVs use __le16 instead:
+ * __le16 type
+ * __le16 len
+ * u8[] data
+ */
+ *tlv++ = *ie++;
+ *tlv++ = 0;
+ tlv_len = *tlv++ = *ie++;
+ *tlv++ = 0;
+ while (tlv_len--)
+ *tlv++ = *ie++;
+ /* the TLV is two bytes larger than the IE */
+ return ie_len + 2;
+}
+
+/***************************************************************************
+ * Set Channel
+ */
+
static int lbs_cfg_set_channel(struct wiphy *wiphy,
struct net_device *netdev,
- struct ieee80211_channel *chan,
+ struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type)
{
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type);
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
+ channel->center_freq, channel_type);
if (channel_type != NL80211_CHAN_NO_HT)
goto out;
- ret = lbs_set_channel(priv, chan->hw_value);
+ ret = lbs_set_channel(priv, channel->hw_value);
+
+ out:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+
+/***************************************************************************
+ * Scanning
+ */
+
+/*
+ * When scanning, the firmware doesn't send a nul packet with the power-safe
+ * bit to the AP. So we cannot stay away from our current channel too long,
+ * otherwise we loose data. So take a "nap" while scanning every other
+ * while.
+ */
+#define LBS_SCAN_BEFORE_NAP 4
+
+
+/*
+ * When the firmware reports back a scan-result, it gives us an "u8 rssi",
+ * which isn't really an RSSI, as it becomes larger when moving away from
+ * the AP. Anyway, we need to convert that into mBm.
+ */
+#define LBS_SCAN_RSSI_TO_MBM(rssi) \
+ ((-(int)rssi + 3)*100)
+
+static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
+ struct cmd_header *resp)
+{
+ struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
+ int bsssize;
+ const u8 *pos;
+ u16 nr_sets;
+ const u8 *tsfdesc;
+ int tsfsize;
+ int i;
+ int ret = -EILSEQ;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ bsssize = get_unaligned_le16(&scanresp->bssdescriptsize);
+ nr_sets = le16_to_cpu(resp->size);
+
+ /*
+ * The general layout of the scan response is described in chapter
+ * 5.7.1. Basically we have a common part, then any number of BSS
+ * descriptor sections. Finally we have section with the same number
+ * of TSFs.
+ *
+ * cmd_ds_802_11_scan_rsp
+ * cmd_header
+ * pos_size
+ * nr_sets
+ * bssdesc 1
+ * bssid
+ * rssi
+ * timestamp
+ * intvl
+ * capa
+ * IEs
+ * bssdesc 2
+ * bssdesc n
+ * MrvlIEtypes_TsfFimestamp_t
+ * TSF for BSS 1
+ * TSF for BSS 2
+ * TSF for BSS n
+ */
+
+ pos = scanresp->bssdesc_and_tlvbuffer;
+
+ tsfdesc = pos + bsssize;
+ tsfsize = 4 + 8 * scanresp->nr_sets;
+
+ /* Validity check: we expect a Marvell-Local TLV */
+ i = get_unaligned_le16(tsfdesc);
+ tsfdesc += 2;
+ if (i != TLV_TYPE_TSFTIMESTAMP)
+ goto done;
+ /* Validity check: the TLV holds TSF values with 8 bytes each, so
+ * the size in the TLV must match the nr_sets value */
+ i = get_unaligned_le16(tsfdesc);
+ tsfdesc += 2;
+ if (i / 8 != scanresp->nr_sets)
+ goto done;
+
+ for (i = 0; i < scanresp->nr_sets; i++) {
+ const u8 *bssid;
+ const u8 *ie;
+ int left;
+ int ielen;
+ int rssi;
+ u16 intvl;
+ u16 capa;
+ int chan_no = -1;
+ const u8 *ssid = NULL;
+ u8 ssid_len = 0;
+ DECLARE_SSID_BUF(ssid_buf);
+
+ int len = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* BSSID */
+ bssid = pos;
+ pos += ETH_ALEN;
+ /* RSSI */
+ rssi = *pos++;
+ /* Packet time stamp */
+ pos += 8;
+ /* Beacon interval */
+ intvl = get_unaligned_le16(pos);
+ pos += 2;
+ /* Capabilities */
+ capa = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* To find out the channel, we must parse the IEs */
+ ie = pos;
+ /* 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon
+ interval, capabilities */
+ ielen = left = len - (6 + 1 + 8 + 2 + 2);
+ while (left >= 2) {
+ u8 id, elen;
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+ if (elen > left || elen == 0)
+ goto done;
+ if (id == WLAN_EID_DS_PARAMS)
+ chan_no = *pos;
+ if (id == WLAN_EID_SSID) {
+ ssid = pos;
+ ssid_len = elen;
+ }
+ left -= elen;
+ pos += elen;
+ }
+
+ /* No channel, no luck */
+ if (chan_no != -1) {
+ struct wiphy *wiphy = priv->wdev->wiphy;
+ int freq = ieee80211_channel_to_frequency(chan_no);
+ struct ieee80211_channel *channel =
+ ieee80211_get_channel(wiphy, freq);
+
+ lbs_deb_scan("scan: %pM, capa %04x, chan %2d, %s, "
+ "%d dBm\n",
+ bssid, capa, chan_no,
+ print_ssid(ssid_buf, ssid, ssid_len),
+ LBS_SCAN_RSSI_TO_MBM(rssi)/100);
+
+ if (channel ||
+ !(channel->flags & IEEE80211_CHAN_DISABLED))
+ cfg80211_inform_bss(wiphy, channel,
+ bssid, le64_to_cpu(*(__le64 *)tsfdesc),
+ capa, intvl, ie, ielen,
+ LBS_SCAN_RSSI_TO_MBM(rssi),
+ GFP_KERNEL);
+ }
+ tsfdesc += 8;
+ }
+ ret = 0;
+
+ done:
+ lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
+ return ret;
+}
+
+
+/*
+ * Our scan command contains a TLV, consting of a SSID TLV, a channel list
+ * TLV and a rates TLV. Determine the maximum size of them:
+ */
+#define LBS_SCAN_MAX_CMD_SIZE \
+ (sizeof(struct cmd_ds_802_11_scan) \
+ + LBS_MAX_SSID_TLV_SIZE \
+ + LBS_MAX_CHANNEL_LIST_TLV_SIZE \
+ + LBS_MAX_RATES_TLV_SIZE)
+
+/*
+ * Assumes priv->scan_req is initialized and valid
+ * Assumes priv->scan_channel is initialized
+ */
+static void lbs_scan_worker(struct work_struct *work)
+{
+ struct lbs_private *priv =
+ container_of(work, struct lbs_private, scan_work.work);
+ struct cmd_ds_802_11_scan *scan_cmd;
+ u8 *tlv; /* pointer into our current, growing TLV storage area */
+ int last_channel;
+ int running, carrier;
+
+ lbs_deb_enter(LBS_DEB_SCAN);
+
+ scan_cmd = kzalloc(LBS_SCAN_MAX_CMD_SIZE, GFP_KERNEL);
+ if (scan_cmd == NULL)
+ goto out_no_scan_cmd;
+
+ /* prepare fixed part of scan command */
+ scan_cmd->bsstype = CMD_BSS_TYPE_ANY;
+
+ /* stop network while we're away from our main channel */
+ running = !netif_queue_stopped(priv->dev);
+ carrier = netif_carrier_ok(priv->dev);
+ if (running)
+ netif_stop_queue(priv->dev);
+ if (carrier)
+ netif_carrier_off(priv->dev);
+
+ /* prepare fixed part of scan command */
+ tlv = scan_cmd->tlvbuffer;
+
+ /* add SSID TLV */
+ if (priv->scan_req->n_ssids)
+ tlv += lbs_add_ssid_tlv(tlv,
+ priv->scan_req->ssids[0].ssid,
+ priv->scan_req->ssids[0].ssid_len);
+
+ /* add channel TLVs */
+ last_channel = priv->scan_channel + LBS_SCAN_BEFORE_NAP;
+ if (last_channel > priv->scan_req->n_channels)
+ last_channel = priv->scan_req->n_channels;
+ tlv += lbs_add_channel_list_tlv(priv, tlv, last_channel,
+ priv->scan_req->n_ssids);
+
+ /* add rates TLV */
+ tlv += lbs_add_supported_rates_tlv(tlv);
+
+ if (priv->scan_channel < priv->scan_req->n_channels) {
+ cancel_delayed_work(&priv->scan_work);
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(300));
+ }
+
+ /* This is the final data we are about to send */
+ scan_cmd->hdr.size = cpu_to_le16(tlv - (u8 *)scan_cmd);
+ lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
+ sizeof(*scan_cmd));
+ lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
+ tlv - scan_cmd->tlvbuffer);
+
+ __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
+ le16_to_cpu(scan_cmd->hdr.size),
+ lbs_ret_scan, 0);
+
+ if (priv->scan_channel >= priv->scan_req->n_channels) {
+ /* Mark scan done */
+ cfg80211_scan_done(priv->scan_req, false);
+ priv->scan_req = NULL;
+ }
+
+ /* Restart network */
+ if (carrier)
+ netif_carrier_on(priv->dev);
+ if (running && !priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ kfree(scan_cmd);
+
+ out_no_scan_cmd:
+ lbs_deb_leave(LBS_DEB_SCAN);
+}
+
+
+static int lbs_cfg_scan(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_scan_request *request)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ if (priv->scan_req || delayed_work_pending(&priv->scan_work)) {
+ /* old scan request not yet processed */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n",
+ request->n_ssids, request->n_channels, request->ie_len);
+
+ priv->scan_channel = 0;
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(50));
+
+ if (priv->surpriseremoved)
+ ret = -EIO;
+
+ priv->scan_req = request;
out:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -101,8 +723,1228 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
+/***************************************************************************
+ * Events
+ */
+
+void lbs_send_disconnect_notification(struct lbs_private *priv)
+{
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ cfg80211_disconnected(priv->dev,
+ 0,
+ NULL, 0,
+ GFP_KERNEL);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
+{
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ cfg80211_michael_mic_failure(priv->dev,
+ priv->assoc_bss,
+ event == MACREG_INT_CODE_MIC_ERR_MULTICAST ?
+ NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE,
+ -1,
+ NULL,
+ GFP_KERNEL);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+
+
+
+/***************************************************************************
+ * Connect/disconnect
+ */
+
+
+/*
+ * This removes all WEP keys
+ */
+static int lbs_remove_wep_keys(struct lbs_private *priv)
+{
+ struct cmd_ds_802_11_set_wep cmd;
+ int ret;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
+ cmd.action = cpu_to_le16(CMD_ACT_REMOVE);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+ return ret;
+}
+
+/*
+ * Set WEP keys
+ */
+static int lbs_set_wep_keys(struct lbs_private *priv)
+{
+ struct cmd_ds_802_11_set_wep cmd;
+ int i;
+ int ret;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ /*
+ * command 13 00
+ * size 50 00
+ * sequence xx xx
+ * result 00 00
+ * action 02 00 ACT_ADD
+ * transmit key 00 00
+ * type for key 1 01 WEP40
+ * type for key 2 00
+ * type for key 3 00
+ * type for key 4 00
+ * key 1 39 39 39 39 39 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * key 2 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * key 3 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * key 4 00 00 00 00 00 00 00 00
+ */
+ if (priv->wep_key_len[0] || priv->wep_key_len[1] ||
+ priv->wep_key_len[2] || priv->wep_key_len[3]) {
+ /* Only set wep keys if we have at least one of them */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.keyindex = cpu_to_le16(priv->wep_tx_key);
+ cmd.action = cpu_to_le16(CMD_ACT_ADD);
+
+ for (i = 0; i < 4; i++) {
+ switch (priv->wep_key_len[i]) {
+ case WLAN_KEY_LEN_WEP40:
+ cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
+ break;
+ case WLAN_KEY_LEN_WEP104:
+ cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
+ break;
+ default:
+ cmd.keytype[i] = 0;
+ break;
+ }
+ memcpy(cmd.keymaterial[i], priv->wep_key[i],
+ priv->wep_key_len[i]);
+ }
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
+ } else {
+ /* Otherwise remove all wep keys */
+ ret = lbs_remove_wep_keys(priv);
+ }
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+ return ret;
+}
+
+
+/*
+ * Enable/Disable RSN status
+ */
+static int lbs_enable_rsn(struct lbs_private *priv, int enable)
+{
+ struct cmd_ds_802_11_enable_rsn cmd;
+ int ret;
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", enable);
+
+ /*
+ * cmd 2f 00
+ * size 0c 00
+ * sequence xx xx
+ * result 00 00
+ * action 01 00 ACT_SET
+ * enable 01 00
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.action = cpu_to_le16(CMD_ACT_SET);
+ cmd.enable = cpu_to_le16(enable);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+ return ret;
+}
+
+
+/*
+ * Set WPA/WPA key material
+ */
+
+/* like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we
+ * get rid of WEXT, this should go into host.h */
+
+struct cmd_key_material {
+ struct cmd_header hdr;
+
+ __le16 action;
+ struct MrvlIEtype_keyParamSet param;
+} __attribute__ ((packed));
+
+static int lbs_set_key_material(struct lbs_private *priv,
+ int key_type,
+ int key_info,
+ u8 *key, u16 key_len)
+{
+ struct cmd_key_material cmd;
+ int ret;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ /*
+ * Example for WPA (TKIP):
+ *
+ * cmd 5e 00
+ * size 34 00
+ * sequence xx xx
+ * result 00 00
+ * action 01 00
+ * TLV type 00 01 key param
+ * length 00 26
+ * key type 01 00 TKIP
+ * key info 06 00 UNICAST | ENABLED
+ * key len 20 00
+ * key 32 bytes
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.action = cpu_to_le16(CMD_ACT_SET);
+ cmd.param.type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+ cmd.param.length = cpu_to_le16(sizeof(cmd.param) - 4);
+ cmd.param.keytypeid = cpu_to_le16(key_type);
+ cmd.param.keyinfo = cpu_to_le16(key_info);
+ cmd.param.keylen = cpu_to_le16(key_len);
+ if (key && key_len)
+ memcpy(cmd.param.key, key, key_len);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+ return ret;
+}
+
+
+/*
+ * Sets the auth type (open, shared, etc) in the firmware. That
+ * we use CMD_802_11_AUTHENTICATE is misleading, this firmware
+ * command doesn't send an authentication frame at all, it just
+ * stores the auth_type.
+ */
+static int lbs_set_authtype(struct lbs_private *priv,
+ struct cfg80211_connect_params *sme)
+{
+ struct cmd_ds_802_11_authenticate cmd;
+ int ret;
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", sme->auth_type);
+
+ /*
+ * cmd 11 00
+ * size 19 00
+ * sequence xx xx
+ * result 00 00
+ * BSS id 00 13 19 80 da 30
+ * auth type 00
+ * reserved 00 00 00 00 00 00 00 00 00 00
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ if (sme->bssid)
+ memcpy(cmd.bssid, sme->bssid, ETH_ALEN);
+ /* convert auth_type */
+ ret = lbs_auth_to_authtype(sme->auth_type);
+ if (ret < 0)
+ goto done;
+
+ cmd.authtype = ret;
+ ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd);
+
+ done:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+/*
+ * Create association request
+ */
+#define LBS_ASSOC_MAX_CMD_SIZE \
+ (sizeof(struct cmd_ds_802_11_associate) \
+ - 512 /* cmd_ds_802_11_associate.iebuf */ \
+ + LBS_MAX_SSID_TLV_SIZE \
+ + LBS_MAX_CHANNEL_TLV_SIZE \
+ + LBS_MAX_CF_PARAM_TLV_SIZE \
+ + LBS_MAX_AUTH_TYPE_TLV_SIZE \
+ + LBS_MAX_WPA_TLV_SIZE)
+
+static int lbs_associate(struct lbs_private *priv,
+ struct cfg80211_bss *bss,
+ struct cfg80211_connect_params *sme)
+{
+ struct cmd_ds_802_11_associate_response *resp;
+ struct cmd_ds_802_11_associate *cmd = kzalloc(LBS_ASSOC_MAX_CMD_SIZE,
+ GFP_KERNEL);
+ const u8 *ssid_eid;
+ size_t len, resp_ie_len;
+ int status;
+ int ret;
+ u8 *pos = &(cmd->iebuf[0]);
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * cmd 50 00
+ * length 34 00
+ * sequence xx xx
+ * result 00 00
+ * BSS id 00 13 19 80 da 30
+ * capabilities 11 00
+ * listen interval 0a 00
+ * beacon interval 00 00
+ * DTIM period 00
+ * TLVs xx (up to 512 bytes)
+ */
+ cmd->hdr.command = cpu_to_le16(CMD_802_11_ASSOCIATE);
+
+ /* Fill in static fields */
+ memcpy(cmd->bssid, bss->bssid, ETH_ALEN);
+ cmd->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
+ cmd->capability = cpu_to_le16(bss->capability);
+
+ /* add SSID TLV */
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssid_eid)
+ pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
+ else
+ lbs_deb_assoc("no SSID\n");
+
+ /* add DS param TLV */
+ if (bss->channel)
+ pos += lbs_add_channel_tlv(pos, bss->channel->hw_value);
+ else
+ lbs_deb_assoc("no channel\n");
+
+ /* add (empty) CF param TLV */
+ pos += lbs_add_cf_param_tlv(pos);
+
+ /* add rates TLV */
+ pos += lbs_add_common_rates_tlv(pos, bss);
+
+ /* add auth type TLV */
+ if (priv->fwrelease >= 0x09000000)
+ pos += lbs_add_auth_type_tlv(pos, sme->auth_type);
+
+ /* add WPA/WPA2 TLV */
+ if (sme->ie && sme->ie_len)
+ pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len);
+
+ len = (sizeof(*cmd) - sizeof(cmd->iebuf)) +
+ (u16)(pos - (u8 *) &cmd->iebuf);
+ cmd->hdr.size = cpu_to_le16(len);
+
+ /* store for later use */
+ memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_ASSOCIATE, cmd);
+ if (ret)
+ goto done;
+
+
+ /* generate connect message to cfg80211 */
+
+ resp = (void *) cmd; /* recast for easier field access */
+ status = le16_to_cpu(resp->statuscode);
+
+ /* Convert statis code of old firmware */
+ if (priv->fwrelease < 0x09000000)
+ switch (status) {
+ case 0:
+ break;
+ case 1:
+ lbs_deb_assoc("invalid association parameters\n");
+ status = WLAN_STATUS_CAPS_UNSUPPORTED;
+ break;
+ case 2:
+ lbs_deb_assoc("timer expired while waiting for AP\n");
+ status = WLAN_STATUS_AUTH_TIMEOUT;
+ break;
+ case 3:
+ lbs_deb_assoc("association refused by AP\n");
+ status = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
+ break;
+ case 4:
+ lbs_deb_assoc("authentication refused by AP\n");
+ status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
+ break;
+ default:
+ lbs_deb_assoc("association failure %d\n", status);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ }
+
+ lbs_deb_assoc("status %d, capability 0x%04x\n", status,
+ le16_to_cpu(resp->capability));
+
+ resp_ie_len = le16_to_cpu(resp->hdr.size)
+ - sizeof(resp->hdr)
+ - 6;
+ cfg80211_connect_result(priv->dev,
+ priv->assoc_bss,
+ sme->ie, sme->ie_len,
+ resp->iebuf, resp_ie_len,
+ status,
+ GFP_KERNEL);
+
+ if (status == 0) {
+ /* TODO: get rid of priv->connect_status */
+ priv->connect_status = LBS_CONNECTED;
+ netif_carrier_on(priv->dev);
+ if (!priv->tx_pending_len)
+ netif_tx_wake_all_queues(priv->dev);
+ }
+
+
+done:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+
+static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ struct cfg80211_bss *bss = NULL;
+ int ret = 0;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ if (sme->bssid) {
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ } else {
+ /*
+ * Here we have an impedance mismatch. The firmware command
+ * CMD_802_11_ASSOCIATE always needs a BSSID, it cannot
+ * connect otherwise. However, for the connect-API of
+ * cfg80211 the bssid is purely optional. We don't get one,
+ * except the user specifies one on the "iw" command line.
+ *
+ * If we don't got one, we could initiate a scan and look
+ * for the best matching cfg80211_bss entry.
+ *
+ * Or, better yet, net/wireless/sme.c get's rewritten into
+ * something more generally useful.
+ */
+ lbs_pr_err("TODO: no BSS specified\n");
+ ret = -ENOTSUPP;
+ goto done;
+ }
+
+
+ if (!bss) {
+ lbs_pr_err("assicate: bss %pM not in scan results\n",
+ sme->bssid);
+ ret = -ENOENT;
+ goto done;
+ }
+ lbs_deb_assoc("trying %pM", sme->bssid);
+ lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n",
+ sme->crypto.cipher_group,
+ sme->key_idx, sme->key_len);
+
+ /* As this is a new connection, clear locally stored WEP keys */
+ priv->wep_tx_key = 0;
+ memset(priv->wep_key, 0, sizeof(priv->wep_key));
+ memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
+
+ /* set/remove WEP keys */
+ switch (sme->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* Store provided WEP keys in priv-> */
+ priv->wep_tx_key = sme->key_idx;
+ priv->wep_key_len[sme->key_idx] = sme->key_len;
+ memcpy(priv->wep_key[sme->key_idx], sme->key, sme->key_len);
+ /* Set WEP keys and WEP mode */
+ lbs_set_wep_keys(priv);
+ priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
+ lbs_set_mac_control(priv);
+ /* No RSN mode for WEP */
+ lbs_enable_rsn(priv, 0);
+ break;
+ case 0: /* there's no WLAN_CIPHER_SUITE_NONE definition */
+ /*
+ * If we don't have no WEP, no WPA and no WPA2,
+ * we remove all keys like in the WPA/WPA2 setup,
+ * we just don't set RSN.
+ *
+ * Therefore: fall-throught
+ */
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ /* Remove WEP keys and WEP mode */
+ lbs_remove_wep_keys(priv);
+ priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
+ lbs_set_mac_control(priv);
+
+ /* clear the WPA/WPA2 keys */
+ lbs_set_key_material(priv,
+ KEY_TYPE_ID_WEP, /* doesn't matter */
+ KEY_INFO_WPA_UNICAST,
+ NULL, 0);
+ lbs_set_key_material(priv,
+ KEY_TYPE_ID_WEP, /* doesn't matter */
+ KEY_INFO_WPA_MCAST,
+ NULL, 0);
+ /* RSN mode for WPA/WPA2 */
+ lbs_enable_rsn(priv, sme->crypto.cipher_group != 0);
+ break;
+ default:
+ lbs_pr_err("unsupported cipher group 0x%x\n",
+ sme->crypto.cipher_group);
+ ret = -ENOTSUPP;
+ goto done;
+ }
+
+ lbs_set_authtype(priv, sme);
+ lbs_set_radio(priv, preamble, 1);
+
+ /* Do the actual association */
+ lbs_associate(priv, bss, sme);
+
+ done:
+ if (bss)
+ cfg80211_put_bss(bss);
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ struct cmd_ds_802_11_deauthenticate cmd;
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code);
+
+ /* store for lbs_cfg_ret_disconnect() */
+ priv->disassoc_reason = reason_code;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ /* Mildly ugly to use a locally store my own BSSID ... */
+ memcpy(cmd.macaddr, &priv->assoc_bss, ETH_ALEN);
+ cmd.reasoncode = cpu_to_le16(reason_code);
+
+ if (lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd))
+ return -EFAULT;
+
+ cfg80211_disconnected(priv->dev,
+ priv->disassoc_reason,
+ NULL, 0,
+ GFP_KERNEL);
+ priv->connect_status = LBS_DISCONNECTED;
+
+ return 0;
+}
+
+
+static int lbs_cfg_set_default_key(struct wiphy *wiphy,
+ struct net_device *netdev,
+ u8 key_index)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ if (key_index != priv->wep_tx_key) {
+ lbs_deb_assoc("set_default_key: to %d\n", key_index);
+ priv->wep_tx_key = key_index;
+ lbs_set_wep_keys(priv);
+ }
+
+ return 0;
+}
+
+
+static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
+ u8 idx, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ u16 key_info;
+ u16 key_type;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n",
+ params->cipher, mac_addr);
+ lbs_deb_assoc("add_key: key index %d, key len %d\n",
+ idx, params->key_len);
+ if (params->key_len)
+ lbs_deb_hex(LBS_DEB_CFG80211, "KEY",
+ params->key, params->key_len);
+
+ lbs_deb_assoc("add_key: seq len %d\n", params->seq_len);
+ if (params->seq_len)
+ lbs_deb_hex(LBS_DEB_CFG80211, "SEQ",
+ params->seq, params->seq_len);
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* actually compare if something has changed ... */
+ if ((priv->wep_key_len[idx] != params->key_len) ||
+ memcmp(priv->wep_key[idx],
+ params->key, params->key_len) != 0) {
+ priv->wep_key_len[idx] = params->key_len;
+ memcpy(priv->wep_key[idx],
+ params->key, params->key_len);
+ lbs_set_wep_keys(priv);
+ }
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_info = KEY_INFO_WPA_ENABLED | ((idx == 0)
+ ? KEY_INFO_WPA_UNICAST
+ : KEY_INFO_WPA_MCAST);
+ key_type = (params->cipher == WLAN_CIPHER_SUITE_TKIP)
+ ? KEY_TYPE_ID_TKIP
+ : KEY_TYPE_ID_AES;
+ lbs_set_key_material(priv,
+ key_type,
+ key_info,
+ params->key, params->key_len);
+ break;
+ default:
+ lbs_pr_err("unhandled cipher 0x%x\n", params->cipher);
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+
+static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
+ u8 key_index, const u8 *mac_addr)
+{
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
+ key_index, mac_addr);
+
+#ifdef TODO
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ /*
+ * I think can keep this a NO-OP, because:
+
+ * - we clear all keys whenever we do lbs_cfg_connect() anyway
+ * - neither "iw" nor "wpa_supplicant" won't call this during
+ * an ongoing connection
+ * - TODO: but I have to check if this is still true when
+ * I set the AP to periodic re-keying
+ * - we've not kzallec() something when we've added a key at
+ * lbs_cfg_connect() or lbs_cfg_add_key().
+ *
+ * This causes lbs_cfg_del_key() only called at disconnect time,
+ * where we'd just waste time deleting a key that is not going
+ * to be used anyway.
+ */
+ if (key_index < 3 && priv->wep_key_len[key_index]) {
+ priv->wep_key_len[key_index] = 0;
+ lbs_set_wep_keys(priv);
+ }
+#endif
+
+ return 0;
+}
+
+
+
+/***************************************************************************
+ * Monitor mode
+ */
+
+/* like "struct cmd_ds_802_11_monitor_mode", but with cmd_header. Once we
+ * get rid of WEXT, this should go into host.h */
+struct cmd_monitor_mode {
+ struct cmd_header hdr;
+
+ __le16 action;
+ __le16 mode;
+} __attribute__ ((packed));
+
+static int lbs_enable_monitor_mode(struct lbs_private *priv, int mode)
+{
+ struct cmd_monitor_mode cmd;
+ int ret;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ /*
+ * cmd 98 00
+ * size 0c 00
+ * sequence xx xx
+ * result 00 00
+ * action 01 00 ACT_SET
+ * enable 01 00
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.action = cpu_to_le16(CMD_ACT_SET);
+ cmd.mode = cpu_to_le16(mode);
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_MONITOR_MODE, &cmd);
+
+ if (ret == 0)
+ priv->dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ else
+ priv->dev->type = ARPHRD_ETHER;
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+ return ret;
+}
+
+
+
+
+
+
+/***************************************************************************
+ * Get station
+ */
+
+/*
+ * Returns the signal or 0 in case of an error.
+ */
+
+/* like "struct cmd_ds_802_11_rssi", but with cmd_header. Once we get rid
+ * of WEXT, this should go into host.h */
+struct cmd_rssi {
+ struct cmd_header hdr;
+
+ __le16 n_or_snr;
+ __le16 nf;
+ __le16 avg_snr;
+ __le16 avg_nf;
+} __attribute__ ((packed));
+
+static int lbs_get_signal(struct lbs_private *priv, s8 *signal, s8 *noise)
+{
+ struct cmd_rssi cmd;
+ int ret;
+
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.n_or_snr = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
+ ret = lbs_cmd_with_response(priv, CMD_802_11_RSSI, &cmd);
+
+ if (ret == 0) {
+ *signal = CAL_RSSI(le16_to_cpu(cmd.n_or_snr),
+ le16_to_cpu(cmd.nf));
+ *noise = CAL_NF(le16_to_cpu(cmd.nf));
+ }
+ return ret;
+}
+
+
+static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ s8 signal, noise;
+ int ret;
+ size_t i;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ sinfo->filled |= STATION_INFO_TX_BYTES |
+ STATION_INFO_TX_PACKETS |
+ STATION_INFO_RX_BYTES |
+ STATION_INFO_RX_PACKETS;
+ sinfo->tx_bytes = priv->dev->stats.tx_bytes;
+ sinfo->tx_packets = priv->dev->stats.tx_packets;
+ sinfo->rx_bytes = priv->dev->stats.rx_bytes;
+ sinfo->rx_packets = priv->dev->stats.rx_packets;
+
+ /* Get current RSSI */
+ ret = lbs_get_signal(priv, &signal, &noise);
+ if (ret == 0) {
+ sinfo->signal = signal;
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ }
+
+ /* Convert priv->cur_rate from hw_value to NL80211 value */
+ for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
+ if (priv->cur_rate == lbs_rates[i].hw_value) {
+ sinfo->txrate.legacy = lbs_rates[i].bitrate;
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+
+
+/***************************************************************************
+ * "Site survey", here just current channel and noise level
+ */
+
+static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
+ int idx, struct survey_info *survey)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ s8 signal, noise;
+ int ret;
+
+ if (idx != 0)
+ ret = -ENOENT;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ survey->channel = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(priv->channel));
+
+ ret = lbs_get_signal(priv, &signal, &noise);
+ if (ret == 0) {
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = noise;
+ }
+
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+
+
+/***************************************************************************
+ * Change interface
+ */
+
+static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ ret = lbs_enable_monitor_mode(priv, 1);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ ret = lbs_enable_monitor_mode(priv, 0);
+ if (!ret)
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
+ ret = lbs_enable_monitor_mode(priv, 0);
+ if (!ret)
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+
+ if (!ret)
+ priv->wdev->iftype = type;
+
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+
+/***************************************************************************
+ * IBSS (Ad-Hoc)
+ */
+
+/* The firmware needs the following bits masked out of the beacon-derived
+ * capability field when associating/joining to a BSS:
+ * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
+ */
+#define CAPINFO_MASK (~(0xda00))
+
+
+static void lbs_join_post(struct lbs_private *priv,
+ struct cfg80211_ibss_params *params,
+ u8 *bssid, u16 capability)
+{
+ u8 fake_ie[2 + IEEE80211_MAX_SSID_LEN + /* ssid */
+ 2 + 4 + /* basic rates */
+ 2 + 1 + /* DS parameter */
+ 2 + 2 + /* atim */
+ 2 + 8]; /* extended rates */
+ u8 *fake = fake_ie;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ /*
+ * For cfg80211_inform_bss, we'll need a fake IE, as we can't get
+ * the real IE from the firmware. So we fabricate a fake IE based on
+ * what the firmware actually sends (sniffed with wireshark).
+ */
+ /* Fake SSID IE */
+ *fake++ = WLAN_EID_SSID;
+ *fake++ = params->ssid_len;
+ memcpy(fake, params->ssid, params->ssid_len);
+ fake += params->ssid_len;
+ /* Fake supported basic rates IE */
+ *fake++ = WLAN_EID_SUPP_RATES;
+ *fake++ = 4;
+ *fake++ = 0x82;
+ *fake++ = 0x84;
+ *fake++ = 0x8b;
+ *fake++ = 0x96;
+ /* Fake DS channel IE */
+ *fake++ = WLAN_EID_DS_PARAMS;
+ *fake++ = 1;
+ *fake++ = params->channel->hw_value;
+ /* Fake IBSS params IE */
+ *fake++ = WLAN_EID_IBSS_PARAMS;
+ *fake++ = 2;
+ *fake++ = 0; /* ATIM=0 */
+ *fake++ = 0;
+ /* Fake extended rates IE, TODO: don't add this for 802.11b only,
+ * but I don't know how this could be checked */
+ *fake++ = WLAN_EID_EXT_SUPP_RATES;
+ *fake++ = 8;
+ *fake++ = 0x0c;
+ *fake++ = 0x12;
+ *fake++ = 0x18;
+ *fake++ = 0x24;
+ *fake++ = 0x30;
+ *fake++ = 0x48;
+ *fake++ = 0x60;
+ *fake++ = 0x6c;
+ lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
+
+ cfg80211_inform_bss(priv->wdev->wiphy,
+ params->channel,
+ bssid,
+ 0,
+ capability,
+ params->beacon_interval,
+ fake_ie, fake - fake_ie,
+ 0, GFP_KERNEL);
+
+ memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
+ priv->wdev->ssid_len = params->ssid_len;
+
+ cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+
+ /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
+ priv->connect_status = LBS_CONNECTED;
+ netif_carrier_on(priv->dev);
+ if (!priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+static int lbs_ibss_join_existing(struct lbs_private *priv,
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_bss *bss)
+{
+ const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
+ struct cmd_ds_802_11_ad_hoc_join cmd;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ /* TODO: set preamble based on scan result */
+ ret = lbs_set_radio(priv, preamble, 1);
+ if (ret)
+ goto out;
+
+ /*
+ * Example CMD_802_11_AD_HOC_JOIN command:
+ *
+ * command 2c 00 CMD_802_11_AD_HOC_JOIN
+ * size 65 00
+ * sequence xx xx
+ * result 00 00
+ * bssid 02 27 27 97 2f 96
+ * ssid 49 42 53 53 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * type 02 CMD_BSS_TYPE_IBSS
+ * beacon period 64 00
+ * dtim period 00
+ * timestamp 00 00 00 00 00 00 00 00
+ * localtime 00 00 00 00 00 00 00 00
+ * IE DS 03
+ * IE DS len 01
+ * IE DS channel 01
+ * reserveed 00 00 00 00
+ * IE IBSS 06
+ * IE IBSS len 02
+ * IE IBSS atim 00 00
+ * reserved 00 00 00 00
+ * capability 02 00
+ * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c 00
+ * fail timeout ff 00
+ * probe delay 00 00
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+ memcpy(cmd.bss.bssid, bss->bssid, ETH_ALEN);
+ memcpy(cmd.bss.ssid, params->ssid, params->ssid_len);
+ cmd.bss.type = CMD_BSS_TYPE_IBSS;
+ cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval);
+ cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS;
+ cmd.bss.ds.header.len = 1;
+ cmd.bss.ds.channel = params->channel->hw_value;
+ cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS;
+ cmd.bss.ibss.header.len = 2;
+ cmd.bss.ibss.atimwindow = 0;
+ cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
+
+ /* set rates to the intersection of our rates and the rates in the
+ bss */
+ if (!rates_eid) {
+ lbs_add_rates(cmd.bss.rates);
+ } else {
+ int hw, i;
+ u8 rates_max = rates_eid[1];
+ u8 *rates = cmd.bss.rates;
+ for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
+ u8 hw_rate = lbs_rates[hw].bitrate / 5;
+ for (i = 0; i < rates_max; i++) {
+ if (hw_rate == (rates_eid[i+2] & 0x7f)) {
+ u8 rate = rates_eid[i+2];
+ if (rate == 0x02 || rate == 0x04 ||
+ rate == 0x0b || rate == 0x16)
+ rate |= 0x80;
+ *rates++ = rate;
+ }
+ }
+ }
+ }
+
+ /* Only v8 and below support setting this */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
+ cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
+ cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
+ }
+ ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
+ if (ret)
+ goto out;
+
+ /*
+ * This is a sample response to CMD_802_11_AD_HOC_JOIN:
+ *
+ * response 2c 80
+ * size 09 00
+ * sequence xx xx
+ * result 00 00
+ * reserved 00
+ */
+ lbs_join_post(priv, params, bss->bssid, bss->capability);
+
+ out:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+
+static int lbs_ibss_start_new(struct lbs_private *priv,
+ struct cfg80211_ibss_params *params)
+{
+ struct cmd_ds_802_11_ad_hoc_start cmd;
+ struct cmd_ds_802_11_ad_hoc_result *resp =
+ (struct cmd_ds_802_11_ad_hoc_result *) &cmd;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
+ int ret = 0;
+ u16 capability;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ ret = lbs_set_radio(priv, preamble, 1);
+ if (ret)
+ goto out;
+
+ /*
+ * Example CMD_802_11_AD_HOC_START command:
+ *
+ * command 2b 00 CMD_802_11_AD_HOC_START
+ * size b1 00
+ * sequence xx xx
+ * result 00 00
+ * ssid 54 45 53 54 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00
+ * bss type 02
+ * beacon period 64 00
+ * dtim period 00
+ * IE IBSS 06
+ * IE IBSS len 02
+ * IE IBSS atim 00 00
+ * reserved 00 00 00 00
+ * IE DS 03
+ * IE DS len 01
+ * IE DS channel 01
+ * reserved 00 00 00 00
+ * probe delay 00 00
+ * capability 02 00
+ * rates 82 84 8b 96 (basic rates with have bit 7 set)
+ * 0c 12 18 24 30 48 60 6c
+ * padding 100 bytes
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ memcpy(cmd.ssid, params->ssid, params->ssid_len);
+ cmd.bsstype = CMD_BSS_TYPE_IBSS;
+ cmd.beaconperiod = cpu_to_le16(params->beacon_interval);
+ cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS;
+ cmd.ibss.header.len = 2;
+ cmd.ibss.atimwindow = 0;
+ cmd.ds.header.id = WLAN_EID_DS_PARAMS;
+ cmd.ds.header.len = 1;
+ cmd.ds.channel = params->channel->hw_value;
+ /* Only v8 and below support setting probe delay */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8)
+ cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
+ /* TODO: mix in WLAN_CAPABILITY_PRIVACY */
+ capability = WLAN_CAPABILITY_IBSS;
+ cmd.capability = cpu_to_le16(capability);
+ lbs_add_rates(cmd.rates);
+
+
+ ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
+ if (ret)
+ goto out;
+
+ /*
+ * This is a sample response to CMD_802_11_AD_HOC_JOIN:
+ *
+ * response 2b 80
+ * size 14 00
+ * sequence xx xx
+ * result 00 00
+ * reserved 00
+ * bssid 02 2b 7b 0f 86 0e
+ */
+ lbs_join_post(priv, params, resp->bssid, capability);
+
+ out:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ int ret = 0;
+ struct cfg80211_bss *bss;
+ DECLARE_SSID_BUF(ssid_buf);
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ if (!params->channel) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = lbs_set_channel(priv, params->channel->hw_value);
+ if (ret)
+ goto out;
+
+ /* Search if someone is beaconing. This assumes that the
+ * bss list is populated already */
+ bss = cfg80211_get_bss(wiphy, params->channel, params->bssid,
+ params->ssid, params->ssid_len,
+ WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+
+ if (bss) {
+ ret = lbs_ibss_join_existing(priv, params, bss);
+ cfg80211_put_bss(bss);
+ } else
+ ret = lbs_ibss_start_new(priv, params);
+
+
+ out:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ struct cmd_ds_802_11_ad_hoc_stop cmd;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
+
+ /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
+ lbs_mac_event_disconnected(priv);
+
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+
+
+
+/***************************************************************************
+ * Initialization
+ */
+
static struct cfg80211_ops lbs_cfg80211_ops = {
.set_channel = lbs_cfg_set_channel,
+ .scan = lbs_cfg_scan,
+ .connect = lbs_cfg_connect,
+ .disconnect = lbs_cfg_disconnect,
+ .add_key = lbs_cfg_add_key,
+ .del_key = lbs_cfg_del_key,
+ .set_default_key = lbs_cfg_set_default_key,
+ .get_station = lbs_cfg_get_station,
+ .dump_survey = lbs_get_survey,
+ .change_virtual_intf = lbs_change_intf,
+ .join_ibss = lbs_join_ibss,
+ .leave_ibss = lbs_leave_ibss,
};
@@ -142,6 +1984,36 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev)
}
+static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
+{
+ struct region_code_mapping {
+ const char *cn;
+ int code;
+ };
+
+ /* Section 5.17.2 */
+ static struct region_code_mapping regmap[] = {
+ {"US ", 0x10}, /* US FCC */
+ {"CA ", 0x20}, /* Canada */
+ {"EU ", 0x30}, /* ETSI */
+ {"ES ", 0x31}, /* Spain */
+ {"FR ", 0x32}, /* France */
+ {"JP ", 0x40}, /* Japan */
+ };
+ size_t i;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ for (i = 0; i < ARRAY_SIZE(regmap); i++)
+ if (regmap[i].code == priv->regioncode) {
+ regulatory_hint(priv->wdev->wiphy, regmap[i].cn);
+ break;
+ }
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+
/*
* This function get's called after lbs_setup_firmware() determined the
* firmware capabities. So we can setup the wiphy according to our
@@ -157,10 +2029,12 @@ int lbs_cfg_register(struct lbs_private *priv)
wdev->wiphy->max_scan_ssids = 1;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- /* TODO: BIT(NL80211_IFTYPE_ADHOC); */
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wdev->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ if (lbs_rtap_supported(priv))
+ wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
- /* TODO: honor priv->regioncode */
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
/*
@@ -169,6 +2043,7 @@ int lbs_cfg_register(struct lbs_private *priv)
*/
wdev->wiphy->cipher_suites = cipher_suites;
wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+ wdev->wiphy->reg_notifier = lbs_reg_notifier;
ret = wiphy_register(wdev->wiphy);
if (ret < 0)
@@ -180,10 +2055,129 @@ int lbs_cfg_register(struct lbs_private *priv)
if (ret)
lbs_pr_err("cannot register network device\n");
+ INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
+
+ lbs_cfg_set_regulatory_hint(priv);
+
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
}
+/**
+ * @brief This function sets DOMAIN INFO to FW
+ * @param priv pointer to struct lbs_private
+ * @return 0; -1
+*/
+static int lbs_11d_set_domain_info(struct lbs_private *priv)
+{
+ int ret;
+
+ ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
+ CMD_ACT_SET,
+ CMD_OPTION_WAITFORRSP, 0, NULL);
+ if (ret)
+ lbs_deb_11d("fail to dnld domain info\n");
+
+ return ret;
+}
+
+static void lbs_send_domain_info_cmd_fw(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ u8 no_of_triplet = 0;
+ u8 no_of_parsed_chan = 0;
+ u8 first_channel = 0, next_chan = 0, max_pwr = 0;
+ u8 i, flag = 0;
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ struct lbs_802_11d_domain_reg *domain_info = &priv->domain_reg;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CFG80211);
+
+ /* Set country code */
+ domain_info->country_code[0] = request->alpha2[0];
+ domain_info->country_code[1] = request->alpha2[1];
+ domain_info->country_code[2] = ' ';
+
+ for (band = 0; band < IEEE80211_NUM_BANDS ; band++) {
+
+ if (!wiphy->bands[band])
+ continue;
+
+ sband = wiphy->bands[band];
+
+ for (i = 0; i < sband->n_channels ; i++) {
+ ch = &sband->channels[i];
+ if (ch->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ if (!flag) {
+ flag = 1;
+ next_chan = first_channel = (u32) ch->hw_value;
+ max_pwr = ch->max_power;
+ no_of_parsed_chan = 1;
+ continue;
+ }
+
+ if (ch->hw_value == next_chan + 1 &&
+ ch->max_power == max_pwr) {
+ next_chan++;
+ no_of_parsed_chan++;
+ } else {
+ domain_info->triplet[no_of_triplet]
+ .chans.first_channel = first_channel;
+ domain_info->triplet[no_of_triplet]
+ .chans.num_channels = no_of_parsed_chan;
+ domain_info->triplet[no_of_triplet]
+ .chans.max_power = max_pwr;
+ no_of_triplet++;
+ flag = 0;
+ }
+ }
+ if (flag) {
+ domain_info->triplet[no_of_triplet]
+ .chans.first_channel = first_channel;
+ domain_info->triplet[no_of_triplet]
+ .chans.num_channels = no_of_parsed_chan;
+ domain_info->triplet[no_of_triplet]
+ .chans.max_power = max_pwr;
+ no_of_triplet++;
+ }
+ }
+
+ domain_info->no_triplet = no_of_triplet;
+
+ /* Set domain info */
+ ret = lbs_11d_set_domain_info(priv);
+ if (ret)
+ lbs_pr_err("11D: error setting domain info in FW\n");
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+}
+
+int lbs_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
+ "callback for domain %c%c\n", request->alpha2[0],
+ request->alpha2[1]);
+
+ lbs_send_domain_info_cmd_fw(wiphy, request);
+
+ lbs_deb_leave(LBS_DEB_CFG80211);
+
+ return 0;
+}
+
+void lbs_scan_deinit(struct lbs_private *priv)
+{
+ lbs_deb_enter(LBS_DEB_CFG80211);
+ cancel_delayed_work_sync(&priv->scan_work);
+}
+
void lbs_cfg_free(struct lbs_private *priv)
{
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index e09a193..756fb98 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -1,16 +1,27 @@
#ifndef __LBS_CFG80211_H__
#define __LBS_CFG80211_H__
-#include "dev.h"
+struct device;
+struct lbs_private;
+struct regulatory_request;
+struct wiphy;
struct wireless_dev *lbs_cfg_alloc(struct device *dev);
int lbs_cfg_register(struct lbs_private *priv);
void lbs_cfg_free(struct lbs_private *priv);
-int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
- u8 ssid_len);
-int lbs_scan_networks(struct lbs_private *priv, int full_scan);
-void lbs_cfg_scan_worker(struct work_struct *work);
+int lbs_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request);
+/* All of those are TODOs: */
+#define lbs_cmd_802_11_rssi(priv, cmdptr) (0)
+#define lbs_ret_802_11_rssi(priv, resp) (0)
+#define lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action) (0)
+#define lbs_ret_802_11_bcn_ctrl(priv, resp) (0)
+
+void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
+
+void lbs_scan_deinit(struct lbs_private *priv);
#endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 0fa6b0e..6c8a9d9 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -7,13 +7,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#include "host.h"
#include "decl.h"
-#include "defs.h"
-#include "dev.h"
-#include "assoc.h"
-#include "wext.h"
-#include "scan.h"
+#include "cfg.h"
#include "cmd.h"
@@ -177,11 +172,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
if (priv->mesh_dev)
memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
- if (lbs_set_regiontable(priv, priv->regioncode, 0)) {
- ret = -1;
- goto out;
- }
-
out:
lbs_deb_leave(LBS_DEB_CMD);
return ret;
@@ -909,6 +899,66 @@ void lbs_set_mac_control(struct lbs_private *priv)
}
/**
+ * @brief This function implements command CMD_802_11D_DOMAIN_INFO
+ * @param priv pointer to struct lbs_private
+ * @param cmd pointer to cmd buffer
+ * @param cmdno cmd ID
+ * @param cmdOption cmd action
+ * @return 0
+*/
+int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
+ struct cmd_ds_command *cmd,
+ u16 cmdoption)
+{
+ struct cmd_ds_802_11d_domain_info *pdomaininfo =
+ &cmd->params.domaininfo;
+ struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
+ u8 nr_triplet = priv->domain_reg.no_triplet;
+
+ lbs_deb_enter(LBS_DEB_11D);
+
+ lbs_deb_11d("nr_triplet=%x\n", nr_triplet);
+
+ pdomaininfo->action = cpu_to_le16(cmdoption);
+ if (cmdoption == CMD_ACT_GET) {
+ cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
+ sizeof(struct cmd_header));
+ lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
+ le16_to_cpu(cmd->size));
+ goto done;
+ }
+
+ domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
+ memcpy(domain->countrycode, priv->domain_reg.country_code,
+ sizeof(domain->countrycode));
+
+ domain->header.len = cpu_to_le16(nr_triplet
+ * sizeof(struct ieee80211_country_ie_triplet)
+ + sizeof(domain->countrycode));
+
+ if (nr_triplet) {
+ memcpy(domain->triplet, priv->domain_reg.triplet,
+ nr_triplet *
+ sizeof(struct ieee80211_country_ie_triplet));
+
+ cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
+ le16_to_cpu(domain->header.len) +
+ sizeof(struct mrvl_ie_header) +
+ sizeof(struct cmd_header));
+ } else {
+ cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
+ sizeof(struct cmd_header));
+ }
+
+ lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
+ le16_to_cpu(cmd->size));
+
+done:
+ lbs_deb_enter(LBS_DEB_11D);
+ return 0;
+}
+
+/**
* @brief This function prepare the command before send to firmware.
*
* @param priv A pointer to struct lbs_private structure
@@ -1006,6 +1056,11 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = 0;
goto done;
+ case CMD_802_11D_DOMAIN_INFO:
+ cmdptr->command = cpu_to_le16(cmd_no);
+ ret = lbs_cmd_802_11d_domain_info(priv, cmdptr, cmd_action);
+ break;
+
case CMD_802_11_TPC_CFG:
cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
cmdptr->size =
@@ -1325,6 +1380,15 @@ int lbs_execute_next_command(struct lbs_private *priv)
* check if in power save mode, if yes, put the device back
* to PS mode
*/
+#ifdef TODO
+ /*
+ * This was the old code for libertas+wext. Someone that
+ * understands this beast should re-code it in a sane way.
+ *
+ * I actually don't understand why this is related to WPA
+ * and to connection status, shouldn't powering should be
+ * independ of such things?
+ */
if ((priv->psmode != LBS802_11POWERMODECAM) &&
(priv->psstate == PS_STATE_FULL_POWER) &&
((priv->connect_status == LBS_CONNECTED) ||
@@ -1346,6 +1410,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
lbs_ps_sleep(priv, 0);
}
}
+#endif
}
ret = 0;
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index d6c3063..a0d9482 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -5,18 +5,10 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
#include <asm/unaligned.h>
-#include <net/iw_handler.h>
+#include <net/cfg80211.h>
-#include "host.h"
-#include "decl.h"
-#include "cmd.h"
-#include "defs.h"
-#include "dev.h"
-#include "assoc.h"
-#include "wext.h"
+#include "cfg.h"
#include "cmd.h"
/**
@@ -39,7 +31,9 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
* It causes problem in the Supplicant
*/
msleep_interruptible(1000);
- lbs_send_disconnect_notification(priv);
+
+ if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
+ lbs_send_disconnect_notification(priv);
/* report disconnect to upper layer */
netif_stop_queue(priv->dev);
@@ -50,23 +44,8 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
- /* reset SNR/NF/RSSI values */
- memset(priv->SNR, 0x00, sizeof(priv->SNR));
- memset(priv->NF, 0x00, sizeof(priv->NF));
- memset(priv->RSSI, 0x00, sizeof(priv->RSSI));
- memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
- memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
- priv->nextSNRNF = 0;
- priv->numSNRNF = 0;
priv->connect_status = LBS_DISCONNECTED;
- /* Clear out associated SSID and BSSID since connection is
- * no longer valid.
- */
- memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
- memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
- priv->curbssparams.ssid_len = 0;
-
if (priv->psstate != PS_STATE_FULL_POWER) {
/* make firmware to exit PS mode */
lbs_deb_cmd("disconnected, so exit PS mode\n");
@@ -118,6 +97,52 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
return ret;
}
+/**
+ * @brief This function parses countryinfo from AP and download country info to FW
+ * @param priv pointer to struct lbs_private
+ * @param resp pointer to command response buffer
+ * @return 0; -1
+ */
+static int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
+{
+ struct cmd_ds_802_11d_domain_info *domaininfo =
+ &resp->params.domaininforesp;
+ struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
+ u16 action = le16_to_cpu(domaininfo->action);
+ s16 ret = 0;
+ u8 nr_triplet = 0;
+
+ lbs_deb_enter(LBS_DEB_11D);
+
+ lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
+ (int)le16_to_cpu(resp->size));
+
+ nr_triplet = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
+ sizeof(struct ieee80211_country_ie_triplet);
+
+ lbs_deb_11d("domain info resp: nr_triplet %d\n", nr_triplet);
+
+ if (nr_triplet > MRVDRV_MAX_TRIPLET_802_11D) {
+ lbs_deb_11d("invalid number of triplets returned!!\n");
+ return -1;
+ }
+
+ switch (action) {
+ case CMD_ACT_SET: /*Proc set action */
+ break;
+
+ case CMD_ACT_GET:
+ break;
+ default:
+ lbs_deb_11d("invalid action:%d\n", domaininfo->action);
+ ret = -1;
+ break;
+ }
+
+ lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
+ return ret;
+}
+
static inline int handle_cmd_response(struct lbs_private *priv,
struct cmd_header *cmd_response)
{
@@ -151,6 +176,10 @@ static inline int handle_cmd_response(struct lbs_private *priv,
ret = lbs_ret_802_11_rssi(priv, resp);
break;
+ case CMD_RET(CMD_802_11D_DOMAIN_INFO):
+ ret = lbs_ret_802_11d_domain_info(resp);
+ break;
+
case CMD_RET(CMD_802_11_TPC_CFG):
spin_lock_irqsave(&priv->driver_lock, flags);
memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
@@ -262,7 +291,7 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
* ad-hoc mode. It takes place in
* lbs_execute_next_command().
*/
- if (priv->mode == IW_MODE_ADHOC &&
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR &&
action == CMD_SUBCMD_ENTER_PS)
priv->psmode = LBS802_11POWERMODECAM;
} else if (action == CMD_SUBCMD_ENTER_PS) {
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index de2caac..1736746 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -1,18 +1,13 @@
-#include <linux/module.h>
#include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include <net/iw_handler.h>
-#include <net/lib80211.h>
-#include "dev.h"
#include "decl.h"
-#include "host.h"
-#include "debugfs.h"
#include "cmd.h"
+#include "debugfs.h"
static struct dentry *lbs_dir;
static char *szStates[] = {
@@ -60,51 +55,6 @@ static ssize_t lbs_dev_info(struct file *file, char __user *userbuf,
return res;
}
-
-static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct lbs_private *priv = file->private_data;
- size_t pos = 0;
- int numscansdone = 0, res;
- unsigned long addr = get_zeroed_page(GFP_KERNEL);
- char *buf = (char *)addr;
- DECLARE_SSID_BUF(ssid);
- struct bss_descriptor * iter_bss;
- if (!buf)
- return -ENOMEM;
-
- pos += snprintf(buf+pos, len-pos,
- "# | ch | rssi | bssid | cap | Qual | SSID\n");
-
- mutex_lock(&priv->lock);
- list_for_each_entry (iter_bss, &priv->network_list, list) {
- u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS);
- u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY);
- u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT);
-
- pos += snprintf(buf+pos, len-pos, "%02u| %03d | %04d | %pM |",
- numscansdone, iter_bss->channel, iter_bss->rssi,
- iter_bss->bssid);
- pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability);
- pos += snprintf(buf+pos, len-pos, "%c%c%c |",
- ibss ? 'A' : 'I', privacy ? 'P' : ' ',
- spectrum_mgmt ? 'S' : ' ');
- pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi));
- pos += snprintf(buf+pos, len-pos, " %s\n",
- print_ssid(ssid, iter_bss->ssid,
- iter_bss->ssid_len));
-
- numscansdone++;
- }
- mutex_unlock(&priv->lock);
-
- res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
-
- free_page(addr);
- return res;
-}
-
static ssize_t lbs_sleepparams_write(struct file *file,
const char __user *user_buf, size_t count,
loff_t *ppos)
@@ -723,8 +673,6 @@ struct lbs_debugfs_files {
static const struct lbs_debugfs_files debugfs_files[] = {
{ "info", 0444, FOPS(lbs_dev_info, write_file_dummy), },
- { "getscantable", 0444, FOPS(lbs_getscantable,
- write_file_dummy), },
{ "sleepparams", 0644, FOPS(lbs_sleepparams_read,
lbs_sleepparams_write), },
};
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 61db8bc..ba5438a 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -1,3 +1,4 @@
+
/**
* This file contains declaration referring to
* functions defined in other source files
@@ -12,6 +13,7 @@
struct lbs_private;
struct sk_buff;
struct net_device;
+struct cmd_ds_command;
/* ethtool.c */
@@ -34,6 +36,8 @@ int lbs_start_card(struct lbs_private *priv);
void lbs_stop_card(struct lbs_private *priv);
void lbs_host_to_card_done(struct lbs_private *priv);
+int lbs_rtap_supported(struct lbs_private *priv);
+
int lbs_set_mac_address(struct net_device *dev, void *addr);
void lbs_set_multicast_list(struct net_device *dev);
@@ -49,5 +53,9 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
u32 lbs_fw_index_to_data_rate(u8 index);
u8 lbs_data_rate_to_fw_index(u32 rate);
+int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
+ struct cmd_ds_command *cmd, u16 cmdoption);
+
+int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
#endif
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 71c5ad4..4536d9c 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -7,8 +7,8 @@
#define _LBS_DEV_H_
#include "mesh.h"
-#include "scan.h"
-#include "assoc.h"
+#include "defs.h"
+#include "host.h"
#include <linux/kfifo.h>
@@ -29,7 +29,6 @@ struct lbs_private {
/* Basic networking */
struct net_device *dev;
u32 connect_status;
- int infra_open;
struct work_struct mcast_work;
u32 nr_of_multicastmacaddr;
u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
@@ -37,6 +36,9 @@ struct lbs_private {
/* CFG80211 */
struct wireless_dev *wdev;
bool wiphy_registered;
+ struct cfg80211_scan_request *scan_req;
+ u8 assoc_bss[ETH_ALEN];
+ u8 disassoc_reason;
/* Mesh */
struct net_device *mesh_dev; /* Virtual device */
@@ -49,10 +51,6 @@ struct lbs_private {
u8 mesh_ssid_len;
#endif
- /* Monitor mode */
- struct net_device *rtap_net_dev;
- u32 monitormode;
-
/* Debugfs */
struct dentry *debugfs_dir;
struct dentry *debugfs_debug;
@@ -62,6 +60,9 @@ struct lbs_private {
struct dentry *regs_dir;
struct dentry *debugfs_regs_files[6];
+ /** 11D and domain regulatory data */
+ struct lbs_802_11d_domain_reg domain_reg;
+
/* Hardware debugging */
u32 mac_offset;
u32 bbp_offset;
@@ -133,14 +134,10 @@ struct lbs_private {
struct workqueue_struct *work_thread;
/** Encryption stuff */
- struct lbs_802_11_security secinfo;
- struct enc_key wpa_mcast_key;
- struct enc_key wpa_unicast_key;
- u8 wpa_ie[MAX_WPA_IE_LEN];
- u8 wpa_ie_len;
- u16 wep_tx_keyidx;
- struct enc_key wep_keys[4];
u8 authtype_auto;
+ u8 wep_tx_key;
+ u8 wep_key[4][WLAN_KEY_LEN_WEP104];
+ u8 wep_key_len[4];
/* Wake On LAN */
uint32_t wol_criteria;
@@ -161,6 +158,7 @@ struct lbs_private {
/* NIC/link operation characteristics */
u16 mac_control;
u8 radio_on;
+ u8 cur_rate;
u8 channel;
s16 txpower_cur;
s16 txpower_min;
@@ -169,42 +167,6 @@ struct lbs_private {
/** Scanning */
struct delayed_work scan_work;
int scan_channel;
- /* remember which channel was scanned last, != 0 if currently scanning */
- u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 scan_ssid_len;
-
- /* Associating */
- struct delayed_work assoc_work;
- struct current_bss_params curbssparams;
- u8 mode;
- struct list_head network_list;
- struct list_head network_free_list;
- struct bss_descriptor *networks;
- struct assoc_request * pending_assoc_req;
- struct assoc_request * in_progress_assoc_req;
- uint16_t enablehwauto;
-
- /* ADHOC */
- u16 beacon_period;
- u8 beacon_enable;
- u8 adhoccreate;
-
- /* WEXT */
- char name[DEV_NAME_LEN];
- u8 nodename[16];
- struct iw_statistics wstats;
- u8 cur_rate;
-#define MAX_REGION_CHANNEL_NUM 2
- struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
-
- /** Requested Signal Strength*/
- u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
- u16 NF[MAX_TYPE_B][MAX_TYPE_AVG];
- u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG];
- u8 rawSNR[DEFAULT_DATA_AVG_FACTOR];
- u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
- u16 nextSNRNF;
- u16 numSNRNF;
};
extern struct cmd_confirm_sleep confirm_sleep;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 0cf31bb..50193aa 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -2,13 +2,8 @@
#include <linux/ethtool.h>
#include <linux/delay.h>
-#include "host.h"
#include "decl.h"
-#include "defs.h"
-#include "dev.h"
-#include "wext.h"
#include "cmd.h"
-#include "mesh.h"
static void lbs_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3bd5d3b..db8e209 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -389,6 +389,30 @@ struct lbs_offset_value {
u32 value;
} __packed;
+#define MRVDRV_MAX_TRIPLET_802_11D 83
+
+#define COUNTRY_CODE_LEN 3
+
+struct mrvl_ie_domain_param_set {
+ struct mrvl_ie_header header;
+
+ u8 countrycode[COUNTRY_CODE_LEN];
+ struct ieee80211_country_ie_triplet triplet[1];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11d_domain_info {
+ __le16 action;
+ struct mrvl_ie_domain_param_set domain;
+} __attribute__ ((packed));
+
+struct lbs_802_11d_domain_reg {
+ /** Country code*/
+ u8 country_code[COUNTRY_CODE_LEN];
+ /** No. of triplet*/
+ u8 no_triplet;
+ struct ieee80211_country_ie_triplet triplet[MRVDRV_MAX_TRIPLET_802_11D];
+} __attribute__ ((packed));
+
/*
* Define data structure for CMD_GET_HW_SPEC
* This structure defines the response for the GET_HW_SPEC command
@@ -949,6 +973,9 @@ struct cmd_ds_command {
struct cmd_ds_bbp_reg_access bbpreg;
struct cmd_ds_rf_reg_access rfreg;
+ struct cmd_ds_802_11d_domain_info domaininfo;
+ struct cmd_ds_802_11d_domain_info domaininforesp;
+
struct cmd_ds_802_11_tpc_cfg tpccfg;
struct cmd_ds_802_11_afc afc;
struct cmd_ds_802_11_led_ctrl ledgpio;
@@ -958,5 +985,4 @@ struct cmd_ds_command {
struct cmd_ds_802_11_beacon_control bcn_ctrl;
} params;
} __packed;
-
#endif
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index abfecc4..b519fc7 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -11,20 +11,14 @@
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
-#include <linux/stddef.h>
-#include <linux/ieee80211.h>
#include <linux/slab.h>
-#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include "host.h"
#include "decl.h"
#include "dev.h"
-#include "wext.h"
#include "cfg.h"
#include "debugfs.h"
-#include "scan.h"
-#include "assoc.h"
#include "cmd.h"
#define DRIVER_RELEASE_VERSION "323.p0"
@@ -96,72 +90,6 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
}
-static int lbs_add_rtap(struct lbs_private *priv);
-static void lbs_remove_rtap(struct lbs_private *priv);
-
-
-/**
- * Get function for sysfs attribute rtap
- */
-static ssize_t lbs_rtap_get(struct device *dev,
- struct device_attribute *attr, char * buf)
-{
- struct lbs_private *priv = to_net_dev(dev)->ml_priv;
- return snprintf(buf, 5, "0x%X\n", priv->monitormode);
-}
-
-/**
- * Set function for sysfs attribute rtap
- */
-static ssize_t lbs_rtap_set(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
-{
- int monitor_mode;
- struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-
- sscanf(buf, "%x", &monitor_mode);
- if (monitor_mode) {
- if (priv->monitormode == monitor_mode)
- return strlen(buf);
- if (!priv->monitormode) {
- if (priv->infra_open || lbs_mesh_open(priv))
- return -EBUSY;
- if (priv->mode == IW_MODE_INFRA)
- lbs_cmd_80211_deauthenticate(priv,
- priv->curbssparams.bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- else if (priv->mode == IW_MODE_ADHOC)
- lbs_adhoc_stop(priv);
- lbs_add_rtap(priv);
- }
- priv->monitormode = monitor_mode;
- } else {
- if (!priv->monitormode)
- return strlen(buf);
- priv->monitormode = 0;
- lbs_remove_rtap(priv);
-
- if (priv->currenttxskb) {
- dev_kfree_skb_any(priv->currenttxskb);
- priv->currenttxskb = NULL;
- }
-
- /* Wake queues, command thread, etc. */
- lbs_host_to_card_done(priv);
- }
-
- lbs_prepare_and_send_command(priv,
- CMD_802_11_MONITOR_MODE, CMD_ACT_SET,
- CMD_OPTION_WAITFORRSP, 0, &priv->monitormode);
- return strlen(buf);
-}
-
-/**
- * lbs_rtap attribute to be exported per ethX interface
- * through sysfs (/sys/class/net/ethX/lbs_rtap)
- */
-static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set );
-
/**
* @brief This function opens the ethX interface
*
@@ -177,13 +105,6 @@ static int lbs_dev_open(struct net_device *dev)
spin_lock_irq(&priv->driver_lock);
- if (priv->monitormode) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->infra_open = 1;
-
if (priv->connect_status == LBS_CONNECTED)
netif_carrier_on(dev);
else
@@ -191,7 +112,6 @@ static int lbs_dev_open(struct net_device *dev)
if (!priv->tx_pending_len)
netif_wake_queue(dev);
- out:
spin_unlock_irq(&priv->driver_lock);
lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
@@ -211,7 +131,6 @@ static int lbs_eth_stop(struct net_device *dev)
lbs_deb_enter(LBS_DEB_NET);
spin_lock_irq(&priv->driver_lock);
- priv->infra_open = 0;
netif_stop_queue(dev);
spin_unlock_irq(&priv->driver_lock);
@@ -733,6 +652,9 @@ static int lbs_setup_firmware(struct lbs_private *priv)
priv->txpower_max = maxlevel;
}
+ /* Send cmd to FW to enable 11D function */
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
lbs_set_mac_control(priv);
done:
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
@@ -822,37 +744,16 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
static int lbs_init_adapter(struct lbs_private *priv)
{
- size_t bufsize;
- int i, ret = 0;
+ int ret;
lbs_deb_enter(LBS_DEB_MAIN);
- /* Allocate buffer to store the BSSID list */
- bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor);
- priv->networks = kzalloc(bufsize, GFP_KERNEL);
- if (!priv->networks) {
- lbs_pr_err("Out of memory allocating beacons\n");
- ret = -1;
- goto out;
- }
-
- /* Initialize scan result lists */
- INIT_LIST_HEAD(&priv->network_free_list);
- INIT_LIST_HEAD(&priv->network_list);
- for (i = 0; i < MAX_NETWORK_COUNT; i++) {
- list_add_tail(&priv->networks[i].list,
- &priv->network_free_list);
- }
-
memset(priv->current_addr, 0xff, ETH_ALEN);
priv->connect_status = LBS_DISCONNECTED;
- priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- priv->mode = IW_MODE_INFRA;
priv->channel = DEFAULT_AD_HOC_CHANNEL;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
priv->radio_on = 1;
- priv->enablehwauto = 1;
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
@@ -907,8 +808,6 @@ static void lbs_free_adapter(struct lbs_private *priv)
kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->auto_deepsleep_timer);
- kfree(priv->networks);
- priv->networks = NULL;
lbs_deb_leave(LBS_DEB_MAIN);
}
@@ -945,7 +844,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
lbs_pr_err("cfg80211 init failed\n");
goto done;
}
- /* TODO? */
+
wdev->iftype = NL80211_IFTYPE_STATION;
priv = wdev_priv(wdev);
priv->wdev = wdev;
@@ -955,7 +854,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
goto err_wdev;
}
- //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
dev = alloc_netdev(0, "wlan%d", ether_setup);
if (!dev) {
dev_err(dmdev, "no memory for network device instance\n");
@@ -971,20 +869,10 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
dev->netdev_ops = &lbs_netdev_ops;
dev->watchdog_timeo = 5 * HZ;
dev->ethtool_ops = &lbs_ethtool_ops;
-#ifdef WIRELESS_EXT
- dev->wireless_handlers = &lbs_handler_def;
-#endif
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
-
- // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ??
-
-
priv->card = card;
- priv->infra_open = 0;
-
- priv->rtap_net_dev = NULL;
strcpy(dev->name, "wlan%d");
lbs_deb_thread("Starting main thread...\n");
@@ -996,8 +884,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
}
priv->work_thread = create_singlethread_workqueue("lbs_worker");
- INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
- INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
priv->wol_criteria = 0xffffffff;
@@ -1031,12 +917,10 @@ void lbs_remove_card(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MAIN);
lbs_remove_mesh(priv);
- lbs_remove_rtap(priv);
+ lbs_scan_deinit(priv);
dev = priv->dev;
- cancel_delayed_work_sync(&priv->scan_work);
- cancel_delayed_work_sync(&priv->assoc_work);
cancel_work_sync(&priv->mcast_work);
/* worker thread destruction blocks on the in-flight command which
@@ -1051,8 +935,6 @@ void lbs_remove_card(struct lbs_private *priv)
lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
}
- lbs_send_disconnect_notification(priv);
-
if (priv->is_deep_sleep) {
priv->is_deep_sleep = 0;
wake_up_interruptible(&priv->ds_awake_q);
@@ -1077,7 +959,7 @@ void lbs_remove_card(struct lbs_private *priv)
EXPORT_SYMBOL_GPL(lbs_remove_card);
-static int lbs_rtap_supported(struct lbs_private *priv)
+int lbs_rtap_supported(struct lbs_private *priv)
{
if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
return 1;
@@ -1109,16 +991,6 @@ int lbs_start_card(struct lbs_private *priv)
lbs_init_mesh(priv);
- /*
- * While rtap isn't related to mesh, only mesh-enabled
- * firmware implements the rtap functionality via
- * CMD_802_11_MONITOR_MODE.
- */
- if (lbs_rtap_supported(priv)) {
- if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
- lbs_pr_err("cannot register lbs_rtap attribute\n");
- }
-
lbs_debugfs_init_one(priv, dev);
lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name);
@@ -1150,9 +1022,6 @@ void lbs_stop_card(struct lbs_private *priv)
lbs_debugfs_remove_one(priv);
lbs_deinit_mesh(priv);
- if (lbs_rtap_supported(priv))
- device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
-
/* Delete the timeout of the currently processing command */
del_timer_sync(&priv->command_timer);
del_timer_sync(&priv->auto_deepsleep_timer);
@@ -1239,87 +1108,6 @@ static void __exit lbs_exit_module(void)
lbs_deb_leave(LBS_DEB_MAIN);
}
-/*
- * rtap interface support fuctions
- */
-
-static int lbs_rtap_open(struct net_device *dev)
-{
- /* Yes, _stop_ the queue. Because we don't support injection */
- lbs_deb_enter(LBS_DEB_MAIN);
- netif_carrier_off(dev);
- netif_stop_queue(dev);
- lbs_deb_leave(LBS_DEB_LEAVE);
- return 0;
-}
-
-static int lbs_rtap_stop(struct net_device *dev)
-{
- lbs_deb_enter(LBS_DEB_MAIN);
- lbs_deb_leave(LBS_DEB_MAIN);
- return 0;
-}
-
-static netdev_tx_t lbs_rtap_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
-}
-
-static void lbs_remove_rtap(struct lbs_private *priv)
-{
- lbs_deb_enter(LBS_DEB_MAIN);
- if (priv->rtap_net_dev == NULL)
- goto out;
- unregister_netdev(priv->rtap_net_dev);
- free_netdev(priv->rtap_net_dev);
- priv->rtap_net_dev = NULL;
-out:
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-static const struct net_device_ops rtap_netdev_ops = {
- .ndo_open = lbs_rtap_open,
- .ndo_stop = lbs_rtap_stop,
- .ndo_start_xmit = lbs_rtap_hard_start_xmit,
-};
-
-static int lbs_add_rtap(struct lbs_private *priv)
-{
- int ret = 0;
- struct net_device *rtap_dev;
-
- lbs_deb_enter(LBS_DEB_MAIN);
- if (priv->rtap_net_dev) {
- ret = -EPERM;
- goto out;
- }
-
- rtap_dev = alloc_netdev(0, "rtap%d", ether_setup);
- if (rtap_dev == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN);
- rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP;
- rtap_dev->netdev_ops = &rtap_netdev_ops;
- rtap_dev->ml_priv = priv;
- SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
-
- ret = register_netdev(rtap_dev);
- if (ret) {
- free_netdev(rtap_dev);
- goto out;
- }
- priv->rtap_net_dev = rtap_dev;
-
-out:
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
- return ret;
-}
-
module_init(lbs_init_module);
module_exit(lbs_exit_module);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e385af1..bc5bc13 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -5,6 +5,7 @@
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
+#include <net/cfg80211.h>
#include "mesh.h"
#include "decl.h"
@@ -314,7 +315,7 @@ static int lbs_mesh_dev_open(struct net_device *dev)
spin_lock_irq(&priv->driver_lock);
- if (priv->monitormode) {
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
ret = -EBUSY;
goto out;
}
@@ -369,9 +370,6 @@ int lbs_add_mesh(struct lbs_private *priv)
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
-#ifdef WIRELESS_EXT
- mesh_dev->wireless_handlers = &mesh_handler_def;
-#endif
mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
/* Register virtual mesh interface */
ret = register_netdev(mesh_dev);
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index e257330..84ea248 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -70,11 +70,6 @@ void lbs_persist_config_init(struct net_device *net);
void lbs_persist_config_remove(struct net_device *net);
-/* WEXT handler */
-
-extern struct iw_handler_def mesh_handler_def;
-
-
/* Ethtool statistics */
struct ethtool_stats;
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 1c63f8c..a4d0bca 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,12 +4,13 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <net/cfg80211.h>
+#include "defs.h"
#include "host.h"
#include "radiotap.h"
#include "decl.h"
#include "dev.h"
-#include "wext.h"
struct eth803hdr {
u8 dest_addr[6];
@@ -39,98 +40,6 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb);
/**
- * @brief This function computes the avgSNR .
- *
- * @param priv A pointer to struct lbs_private structure
- * @return avgSNR
- */
-static u8 lbs_getavgsnr(struct lbs_private *priv)
-{
- u8 i;
- u16 temp = 0;
- if (priv->numSNRNF == 0)
- return 0;
- for (i = 0; i < priv->numSNRNF; i++)
- temp += priv->rawSNR[i];
- return (u8) (temp / priv->numSNRNF);
-
-}
-
-/**
- * @brief This function computes the AvgNF
- *
- * @param priv A pointer to struct lbs_private structure
- * @return AvgNF
- */
-static u8 lbs_getavgnf(struct lbs_private *priv)
-{
- u8 i;
- u16 temp = 0;
- if (priv->numSNRNF == 0)
- return 0;
- for (i = 0; i < priv->numSNRNF; i++)
- temp += priv->rawNF[i];
- return (u8) (temp / priv->numSNRNF);
-
-}
-
-/**
- * @brief This function save the raw SNR/NF to our internel buffer
- *
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
- */
-static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
-{
- if (priv->numSNRNF < DEFAULT_DATA_AVG_FACTOR)
- priv->numSNRNF++;
- priv->rawSNR[priv->nextSNRNF] = p_rx_pd->snr;
- priv->rawNF[priv->nextSNRNF] = p_rx_pd->nf;
- priv->nextSNRNF++;
- if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR)
- priv->nextSNRNF = 0;
-}
-
-/**
- * @brief This function computes the RSSI in received packet.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
- */
-static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
-{
-
- lbs_deb_enter(LBS_DEB_RX);
-
- lbs_deb_rx("rxpd: SNR %d, NF %d\n", p_rx_pd->snr, p_rx_pd->nf);
- lbs_deb_rx("before computing SNR: SNR-avg = %d, NF-avg = %d\n",
- priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
- priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
-
- priv->SNR[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->snr;
- priv->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf;
- lbs_save_rawSNRNF(priv, p_rx_pd);
-
- priv->SNR[TYPE_RXPD][TYPE_AVG] = lbs_getavgsnr(priv) * AVG_SCALE;
- priv->NF[TYPE_RXPD][TYPE_AVG] = lbs_getavgnf(priv) * AVG_SCALE;
- lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n",
- priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
- priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
-
- priv->RSSI[TYPE_RXPD][TYPE_NOAVG] =
- CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_NOAVG],
- priv->NF[TYPE_RXPD][TYPE_NOAVG]);
-
- priv->RSSI[TYPE_RXPD][TYPE_AVG] =
- CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
- priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
-
- lbs_deb_leave(LBS_DEB_RX);
-}
-
-/**
* @brief This function processes received packet and forwards it
* to kernel/upper layer
*
@@ -154,7 +63,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
skb->ip_summed = CHECKSUM_NONE;
- if (priv->monitormode)
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
return process_rxed_802_11_packet(priv, skb);
p_rx_pd = (struct rxpd *) skb->data;
@@ -225,13 +134,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
*/
skb_pull(skb, hdrchop);
- /* Take the data rate from the rxpd structure
- * only if the rate is auto
- */
- if (priv->enablehwauto)
- priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
-
- lbs_compute_rssi(priv, p_rx_pd);
+ priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
dev->stats.rx_bytes += skb->len;
@@ -352,20 +255,18 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr));
memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr));
- /* Take the data rate from the rxpd structure
- * only if the rate is auto
- */
- if (priv->enablehwauto)
- priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
-
- lbs_compute_rssi(priv, prxpd);
+ priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
- skb->protocol = eth_type_trans(skb, priv->rtap_net_dev);
- netif_rx(skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
ret = 0;
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
deleted file mode 100644
index 7d82f13b..0000000
--- a/drivers/net/wireless/libertas/scan.c
+++ /dev/null
@@ -1,1354 +0,0 @@
-/**
- * Functions implementing wlan scan IOCTL and firmware command APIs
- *
- * IOCTL handlers as well as command preperation and response routines
- * for sending scan commands to the firmware.
- */
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <asm/unaligned.h>
-#include <net/lib80211.h>
-
-#include "host.h"
-#include "dev.h"
-#include "scan.h"
-#include "assoc.h"
-#include "wext.h"
-#include "cmd.h"
-
-//! Approximate amount of data needed to pass a scan result back to iwlist
-#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \
- + IEEE80211_MAX_SSID_LEN \
- + IW_EV_UINT_LEN \
- + IW_EV_FREQ_LEN \
- + IW_EV_QUAL_LEN \
- + IEEE80211_MAX_SSID_LEN \
- + IW_EV_PARAM_LEN \
- + 40) /* 40 for WPAIE */
-
-//! Memory needed to store a max sized channel List TLV for a firmware scan
-#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvl_ie_header) \
- + (MRVDRV_MAX_CHANNELS_PER_SCAN \
- * sizeof(struct chanscanparamset)))
-
-//! Memory needed to store a max number/size SSID TLV for a firmware scan
-#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvl_ie_ssid_param_set))
-
-//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max
-#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \
- + CHAN_TLV_MAX_SIZE + SSID_TLV_MAX_SIZE)
-
-//! The maximum number of channels the firmware can scan per command
-#define MRVDRV_MAX_CHANNELS_PER_SCAN 14
-
-/**
- * @brief Number of channels to scan per firmware scan command issuance.
- *
- * Number restricted to prevent hitting the limit on the amount of scan data
- * returned in a single firmware scan command.
- */
-#define MRVDRV_CHANNELS_PER_SCAN_CMD 4
-
-//! Scan time specified in the channel TLV for each channel for passive scans
-#define MRVDRV_PASSIVE_SCAN_CHAN_TIME 100
-
-//! Scan time specified in the channel TLV for each channel for active scans
-#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100
-
-#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
-
-static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
- struct cmd_header *resp);
-
-/*********************************************************************/
-/* */
-/* Misc helper functions */
-/* */
-/*********************************************************************/
-
-/**
- * @brief Unsets the MSB on basic rates
- *
- * Scan through an array and unset the MSB for basic data rates.
- *
- * @param rates buffer of data rates
- * @param len size of buffer
- */
-static void lbs_unset_basic_rate_flags(u8 *rates, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- rates[i] &= 0x7f;
-}
-
-
-static inline void clear_bss_descriptor(struct bss_descriptor *bss)
-{
- /* Don't blow away ->list, just BSS data */
- memset(bss, 0, offsetof(struct bss_descriptor, list));
-}
-
-/**
- * @brief Compare two SSIDs
- *
- * @param ssid1 A pointer to ssid to compare
- * @param ssid2 A pointer to ssid to compare
- *
- * @return 0: ssid is same, otherwise is different
- */
-int lbs_ssid_cmp(uint8_t *ssid1, uint8_t ssid1_len, uint8_t *ssid2,
- uint8_t ssid2_len)
-{
- if (ssid1_len != ssid2_len)
- return -1;
-
- return memcmp(ssid1, ssid2, ssid1_len);
-}
-
-static inline int is_same_network(struct bss_descriptor *src,
- struct bss_descriptor *dst)
-{
- /* A network is only a duplicate if the channel, BSSID, and ESSID
- * all match. We treat all <hidden> with the same BSSID and channel
- * as one network */
- return ((src->ssid_len == dst->ssid_len) &&
- (src->channel == dst->channel) &&
- !compare_ether_addr(src->bssid, dst->bssid) &&
- !memcmp(src->ssid, dst->ssid, src->ssid_len));
-}
-
-
-
-/*********************************************************************/
-/* */
-/* Region channel support */
-/* */
-/*********************************************************************/
-
-#define LBS_TX_PWR_DEFAULT 20 /*100mW */
-#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
-#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
-#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
-#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
-
-/* Format { channel, frequency (MHz), maxtxpower } */
-/* band: 'B/G', region: USA FCC/Canada IC */
-static struct chan_freq_power channel_freq_power_US_BG[] = {
- {1, 2412, LBS_TX_PWR_US_DEFAULT},
- {2, 2417, LBS_TX_PWR_US_DEFAULT},
- {3, 2422, LBS_TX_PWR_US_DEFAULT},
- {4, 2427, LBS_TX_PWR_US_DEFAULT},
- {5, 2432, LBS_TX_PWR_US_DEFAULT},
- {6, 2437, LBS_TX_PWR_US_DEFAULT},
- {7, 2442, LBS_TX_PWR_US_DEFAULT},
- {8, 2447, LBS_TX_PWR_US_DEFAULT},
- {9, 2452, LBS_TX_PWR_US_DEFAULT},
- {10, 2457, LBS_TX_PWR_US_DEFAULT},
- {11, 2462, LBS_TX_PWR_US_DEFAULT}
-};
-
-/* band: 'B/G', region: Europe ETSI */
-static struct chan_freq_power channel_freq_power_EU_BG[] = {
- {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
- {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
- {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
- {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
- {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
- {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
- {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
- {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
- {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
- {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
- {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
- {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
- {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
-};
-
-/* band: 'B/G', region: Spain */
-static struct chan_freq_power channel_freq_power_SPN_BG[] = {
- {10, 2457, LBS_TX_PWR_DEFAULT},
- {11, 2462, LBS_TX_PWR_DEFAULT}
-};
-
-/* band: 'B/G', region: France */
-static struct chan_freq_power channel_freq_power_FR_BG[] = {
- {10, 2457, LBS_TX_PWR_FR_DEFAULT},
- {11, 2462, LBS_TX_PWR_FR_DEFAULT},
- {12, 2467, LBS_TX_PWR_FR_DEFAULT},
- {13, 2472, LBS_TX_PWR_FR_DEFAULT}
-};
-
-/* band: 'B/G', region: Japan */
-static struct chan_freq_power channel_freq_power_JPN_BG[] = {
- {1, 2412, LBS_TX_PWR_JP_DEFAULT},
- {2, 2417, LBS_TX_PWR_JP_DEFAULT},
- {3, 2422, LBS_TX_PWR_JP_DEFAULT},
- {4, 2427, LBS_TX_PWR_JP_DEFAULT},
- {5, 2432, LBS_TX_PWR_JP_DEFAULT},
- {6, 2437, LBS_TX_PWR_JP_DEFAULT},
- {7, 2442, LBS_TX_PWR_JP_DEFAULT},
- {8, 2447, LBS_TX_PWR_JP_DEFAULT},
- {9, 2452, LBS_TX_PWR_JP_DEFAULT},
- {10, 2457, LBS_TX_PWR_JP_DEFAULT},
- {11, 2462, LBS_TX_PWR_JP_DEFAULT},
- {12, 2467, LBS_TX_PWR_JP_DEFAULT},
- {13, 2472, LBS_TX_PWR_JP_DEFAULT},
- {14, 2484, LBS_TX_PWR_JP_DEFAULT}
-};
-
-/**
- * the structure for channel, frequency and power
- */
-struct region_cfp_table {
- u8 region;
- struct chan_freq_power *cfp_BG;
- int cfp_no_BG;
-};
-
-/**
- * the structure for the mapping between region and CFP
- */
-static struct region_cfp_table region_cfp_table[] = {
- {0x10, /*US FCC */
- channel_freq_power_US_BG,
- ARRAY_SIZE(channel_freq_power_US_BG),
- }
- ,
- {0x20, /*CANADA IC */
- channel_freq_power_US_BG,
- ARRAY_SIZE(channel_freq_power_US_BG),
- }
- ,
- {0x30, /*EU*/ channel_freq_power_EU_BG,
- ARRAY_SIZE(channel_freq_power_EU_BG),
- }
- ,
- {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
- ARRAY_SIZE(channel_freq_power_SPN_BG),
- }
- ,
- {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
- ARRAY_SIZE(channel_freq_power_FR_BG),
- }
- ,
- {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
- ARRAY_SIZE(channel_freq_power_JPN_BG),
- }
- ,
-/*Add new region here */
-};
-
-/**
- * @brief This function finds the CFP in
- * region_cfp_table based on region and band parameter.
- *
- * @param region The region code
- * @param band The band
- * @param cfp_no A pointer to CFP number
- * @return A pointer to CFP
- */
-static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
-{
- int i, end;
-
- lbs_deb_enter(LBS_DEB_MAIN);
-
- end = ARRAY_SIZE(region_cfp_table);
-
- for (i = 0; i < end ; i++) {
- lbs_deb_main("region_cfp_table[i].region=%d\n",
- region_cfp_table[i].region);
- if (region_cfp_table[i].region == region) {
- *cfp_no = region_cfp_table[i].cfp_no_BG;
- lbs_deb_leave(LBS_DEB_MAIN);
- return region_cfp_table[i].cfp_BG;
- }
- }
-
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
- return NULL;
-}
-
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
-{
- int ret = 0;
- int i = 0;
-
- struct chan_freq_power *cfp;
- int cfp_no;
-
- lbs_deb_enter(LBS_DEB_MAIN);
-
- memset(priv->region_channel, 0, sizeof(priv->region_channel));
-
- cfp = lbs_get_region_cfp_table(region, &cfp_no);
- if (cfp != NULL) {
- priv->region_channel[i].nrcfp = cfp_no;
- priv->region_channel[i].CFP = cfp;
- } else {
- lbs_deb_main("wrong region code %#x in band B/G\n",
- region);
- ret = -1;
- goto out;
- }
- priv->region_channel[i].valid = 1;
- priv->region_channel[i].region = region;
- priv->region_channel[i].band = band;
- i++;
-out:
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
- return ret;
-}
-
-
-
-
-/*********************************************************************/
-/* */
-/* Main scanning support */
-/* */
-/*********************************************************************/
-
-/**
- * @brief Create a channel list for the driver to scan based on region info
- *
- * Only used from lbs_scan_setup_scan_config()
- *
- * Use the driver region/band information to construct a comprehensive list
- * of channels to scan. This routine is used for any scan that is not
- * provided a specific channel list to scan.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param scanchanlist Output parameter: resulting channel list to scan
- *
- * @return void
- */
-static int lbs_scan_create_channel_list(struct lbs_private *priv,
- struct chanscanparamset *scanchanlist)
-{
- struct region_channel *scanregion;
- struct chan_freq_power *cfp;
- int rgnidx;
- int chanidx;
- int nextchan;
- uint8_t scantype;
-
- chanidx = 0;
-
- /* Set the default scan type to the user specified type, will later
- * be changed to passive on a per channel basis if restricted by
- * regulatory requirements (11d or 11h)
- */
- scantype = CMD_SCAN_TYPE_ACTIVE;
-
- for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
- if (!priv->region_channel[rgnidx].valid)
- continue;
- scanregion = &priv->region_channel[rgnidx];
-
- for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
- struct chanscanparamset *chan = &scanchanlist[chanidx];
-
- cfp = scanregion->CFP + nextchan;
-
- if (scanregion->band == BAND_B || scanregion->band == BAND_G)
- chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
-
- if (scantype == CMD_SCAN_TYPE_PASSIVE) {
- chan->maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME);
- chan->chanscanmode.passivescan = 1;
- } else {
- chan->maxscantime = cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME);
- chan->chanscanmode.passivescan = 0;
- }
-
- chan->channumber = cfp->channel;
- }
- }
- return chanidx;
-}
-
-/*
- * Add SSID TLV of the form:
- *
- * TLV-ID SSID 00 00
- * length 06 00
- * ssid 4d 4e 54 45 53 54
- */
-static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv)
-{
- struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv;
-
- ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
- ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len);
- memcpy(ssid_tlv->ssid, priv->scan_ssid, priv->scan_ssid_len);
- return sizeof(ssid_tlv->header) + priv->scan_ssid_len;
-}
-
-/*
- * Add CHANLIST TLV of the form
- *
- * TLV-ID CHANLIST 01 01
- * length 5b 00
- * channel 1 00 01 00 00 00 64 00
- * radio type 00
- * channel 01
- * scan type 00
- * min scan time 00 00
- * max scan time 64 00
- * channel 2 00 02 00 00 00 64 00
- * channel 3 00 03 00 00 00 64 00
- * channel 4 00 04 00 00 00 64 00
- * channel 5 00 05 00 00 00 64 00
- * channel 6 00 06 00 00 00 64 00
- * channel 7 00 07 00 00 00 64 00
- * channel 8 00 08 00 00 00 64 00
- * channel 9 00 09 00 00 00 64 00
- * channel 10 00 0a 00 00 00 64 00
- * channel 11 00 0b 00 00 00 64 00
- * channel 12 00 0c 00 00 00 64 00
- * channel 13 00 0d 00 00 00 64 00
- *
- */
-static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
- struct chanscanparamset *chan_list,
- int chan_count)
-{
- size_t size = sizeof(struct chanscanparamset) *chan_count;
- struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv;
-
- chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
- memcpy(chan_tlv->chanscanparam, chan_list, size);
- chan_tlv->header.len = cpu_to_le16(size);
- return sizeof(chan_tlv->header) + size;
-}
-
-/*
- * Add RATES TLV of the form
- *
- * TLV-ID RATES 01 00
- * length 0e 00
- * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c
- *
- * The rates are in lbs_bg_rates[], but for the 802.11b
- * rates the high bit isn't set.
- */
-static int lbs_scan_add_rates_tlv(uint8_t *tlv)
-{
- int i;
- struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv;
-
- rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
- tlv += sizeof(rate_tlv->header);
- for (i = 0; i < MAX_RATES; i++) {
- *tlv = lbs_bg_rates[i];
- if (*tlv == 0)
- break;
- /* This code makes sure that the 802.11b rates (1 MBit/s, 2
- MBit/s, 5.5 MBit/s and 11 MBit/s get's the high bit set.
- Note that the values are MBit/s * 2, to mark them as
- basic rates so that the firmware likes it better */
- if (*tlv == 0x02 || *tlv == 0x04 ||
- *tlv == 0x0b || *tlv == 0x16)
- *tlv |= 0x80;
- tlv++;
- }
- rate_tlv->header.len = cpu_to_le16(i);
- return sizeof(rate_tlv->header) + i;
-}
-
-/*
- * Generate the CMD_802_11_SCAN command with the proper tlv
- * for a bunch of channels.
- */
-static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype,
- struct chanscanparamset *chan_list, int chan_count)
-{
- int ret = -ENOMEM;
- struct cmd_ds_802_11_scan *scan_cmd;
- uint8_t *tlv; /* pointer into our current, growing TLV storage area */
-
- lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d",
- bsstype, chan_list ? chan_list[0].channumber : -1,
- chan_count);
-
- /* create the fixed part for scan command */
- scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL);
- if (scan_cmd == NULL)
- goto out;
-
- tlv = scan_cmd->tlvbuffer;
- /* TODO: do we need to scan for a specific BSSID?
- memcpy(scan_cmd->bssid, priv->scan_bssid, ETH_ALEN); */
- scan_cmd->bsstype = bsstype;
-
- /* add TLVs */
- if (priv->scan_ssid_len)
- tlv += lbs_scan_add_ssid_tlv(priv, tlv);
- if (chan_list && chan_count)
- tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count);
- tlv += lbs_scan_add_rates_tlv(tlv);
-
- /* This is the final data we are about to send */
- scan_cmd->hdr.size = cpu_to_le16(tlv - (uint8_t *)scan_cmd);
- lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
- sizeof(*scan_cmd));
- lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
- tlv - scan_cmd->tlvbuffer);
-
- ret = __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
- le16_to_cpu(scan_cmd->hdr.size),
- lbs_ret_80211_scan, 0);
-
-out:
- kfree(scan_cmd);
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Internal function used to start a scan based on an input config
- *
- * Use the input user scan configuration information when provided in
- * order to send the appropriate scan commands to firmware to populate or
- * update the internal driver scan table
- *
- * @param priv A pointer to struct lbs_private structure
- * @param full_scan Do a full-scan (blocking)
- *
- * @return 0 or < 0 if error
- */
-int lbs_scan_networks(struct lbs_private *priv, int full_scan)
-{
- int ret = -ENOMEM;
- struct chanscanparamset *chan_list;
- struct chanscanparamset *curr_chans;
- int chan_count;
- uint8_t bsstype = CMD_BSS_TYPE_ANY;
- int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD;
- union iwreq_data wrqu;
-#ifdef CONFIG_LIBERTAS_DEBUG
- struct bss_descriptor *iter;
- int i = 0;
- DECLARE_SSID_BUF(ssid);
-#endif
-
- lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan);
-
- /* Cancel any partial outstanding partial scans if this scan
- * is a full scan.
- */
- if (full_scan && delayed_work_pending(&priv->scan_work))
- cancel_delayed_work(&priv->scan_work);
-
- /* User-specified bsstype or channel list
- TODO: this can be implemented if some user-space application
- need the feature. Formerly, it was accessible from debugfs,
- but then nowhere used.
- if (user_cfg) {
- if (user_cfg->bsstype)
- bsstype = user_cfg->bsstype;
- } */
-
- lbs_deb_scan("numchannels %d, bsstype %d\n", numchannels, bsstype);
-
- /* Create list of channels to scan */
- chan_list = kzalloc(sizeof(struct chanscanparamset) *
- LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL);
- if (!chan_list) {
- lbs_pr_alert("SCAN: chan_list empty\n");
- goto out;
- }
-
- /* We want to scan all channels */
- chan_count = lbs_scan_create_channel_list(priv, chan_list);
-
- netif_stop_queue(priv->dev);
- if (priv->mesh_dev)
- netif_stop_queue(priv->mesh_dev);
-
- /* Prepare to continue an interrupted scan */
- lbs_deb_scan("chan_count %d, scan_channel %d\n",
- chan_count, priv->scan_channel);
- curr_chans = chan_list;
- /* advance channel list by already-scanned-channels */
- if (priv->scan_channel > 0) {
- curr_chans += priv->scan_channel;
- chan_count -= priv->scan_channel;
- }
-
- /* Send scan command(s)
- * numchannels contains the number of channels we should maximally scan
- * chan_count is the total number of channels to scan
- */
-
- while (chan_count) {
- int to_scan = min(numchannels, chan_count);
- lbs_deb_scan("scanning %d of %d channels\n",
- to_scan, chan_count);
- ret = lbs_do_scan(priv, bsstype, curr_chans,
- to_scan);
- if (ret) {
- lbs_pr_err("SCAN_CMD failed\n");
- goto out2;
- }
- curr_chans += to_scan;
- chan_count -= to_scan;
-
- /* somehow schedule the next part of the scan */
- if (chan_count && !full_scan &&
- !priv->surpriseremoved) {
- /* -1 marks just that we're currently scanning */
- if (priv->scan_channel < 0)
- priv->scan_channel = to_scan;
- else
- priv->scan_channel += to_scan;
- cancel_delayed_work(&priv->scan_work);
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(300));
- /* skip over GIWSCAN event */
- goto out;
- }
-
- }
- memset(&wrqu, 0, sizeof(union iwreq_data));
- wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
-
-#ifdef CONFIG_LIBERTAS_DEBUG
- /* Dump the scan table */
- mutex_lock(&priv->lock);
- lbs_deb_scan("scan table:\n");
- list_for_each_entry(iter, &priv->network_list, list)
- lbs_deb_scan("%02d: BSSID %pM, RSSI %d, SSID '%s'\n",
- i++, iter->bssid, iter->rssi,
- print_ssid(ssid, iter->ssid, iter->ssid_len));
- mutex_unlock(&priv->lock);
-#endif
-
-out2:
- priv->scan_channel = 0;
-
-out:
- if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
- netif_wake_queue(priv->dev);
-
- if (priv->mesh_dev && lbs_mesh_connected(priv) &&
- !priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
-
- kfree(chan_list);
-
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
- return ret;
-}
-
-void lbs_scan_worker(struct work_struct *work)
-{
- struct lbs_private *priv =
- container_of(work, struct lbs_private, scan_work.work);
-
- lbs_deb_enter(LBS_DEB_SCAN);
- lbs_scan_networks(priv, 0);
- lbs_deb_leave(LBS_DEB_SCAN);
-}
-
-
-/*********************************************************************/
-/* */
-/* Result interpretation */
-/* */
-/*********************************************************************/
-
-/**
- * @brief Interpret a BSS scan response returned from the firmware
- *
- * Parse the various fixed fields and IEs passed back for a BSS probe
- * response or beacon from the scan command. Record information as needed
- * in the scan table struct bss_descriptor for that entry.
- *
- * @param bss Output parameter: Pointer to the BSS Entry
- *
- * @return 0 or -1
- */
-static int lbs_process_bss(struct bss_descriptor *bss,
- uint8_t **pbeaconinfo, int *bytesleft)
-{
- struct ieee_ie_fh_param_set *fh;
- struct ieee_ie_ds_param_set *ds;
- struct ieee_ie_cf_param_set *cf;
- struct ieee_ie_ibss_param_set *ibss;
- DECLARE_SSID_BUF(ssid);
- uint8_t *pos, *end, *p;
- uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
- uint16_t beaconsize = 0;
- int ret;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- if (*bytesleft >= sizeof(beaconsize)) {
- /* Extract & convert beacon size from the command buffer */
- beaconsize = get_unaligned_le16(*pbeaconinfo);
- *bytesleft -= sizeof(beaconsize);
- *pbeaconinfo += sizeof(beaconsize);
- }
-
- if (beaconsize == 0 || beaconsize > *bytesleft) {
- *pbeaconinfo += *bytesleft;
- *bytesleft = 0;
- ret = -1;
- goto done;
- }
-
- /* Initialize the current working beacon pointer for this BSS iteration */
- pos = *pbeaconinfo;
- end = pos + beaconsize;
-
- /* Advance the return beacon pointer past the current beacon */
- *pbeaconinfo += beaconsize;
- *bytesleft -= beaconsize;
-
- memcpy(bss->bssid, pos, ETH_ALEN);
- lbs_deb_scan("process_bss: BSSID %pM\n", bss->bssid);
- pos += ETH_ALEN;
-
- if ((end - pos) < 12) {
- lbs_deb_scan("process_bss: Not enough bytes left\n");
- ret = -1;
- goto done;
- }
-
- /*
- * next 4 fields are RSSI, time stamp, beacon interval,
- * and capability information
- */
-
- /* RSSI is 1 byte long */
- bss->rssi = *pos;
- lbs_deb_scan("process_bss: RSSI %d\n", *pos);
- pos++;
-
- /* time stamp is 8 bytes long */
- pos += 8;
-
- /* beacon interval is 2 bytes long */
- bss->beaconperiod = get_unaligned_le16(pos);
- pos += 2;
-
- /* capability information is 2 bytes long */
- bss->capability = get_unaligned_le16(pos);
- lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability);
- pos += 2;
-
- if (bss->capability & WLAN_CAPABILITY_PRIVACY)
- lbs_deb_scan("process_bss: WEP enabled\n");
- if (bss->capability & WLAN_CAPABILITY_IBSS)
- bss->mode = IW_MODE_ADHOC;
- else
- bss->mode = IW_MODE_INFRA;
-
- /* rest of the current buffer are IE's */
- lbs_deb_scan("process_bss: IE len %zd\n", end - pos);
- lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos);
-
- /* process variable IE */
- while (pos <= end - 2) {
- if (pos + pos[1] > end) {
- lbs_deb_scan("process_bss: error in processing IE, "
- "bytes left < IE length\n");
- break;
- }
-
- switch (pos[0]) {
- case WLAN_EID_SSID:
- bss->ssid_len = min_t(int, IEEE80211_MAX_SSID_LEN, pos[1]);
- memcpy(bss->ssid, pos + 2, bss->ssid_len);
- lbs_deb_scan("got SSID IE: '%s', len %u\n",
- print_ssid(ssid, bss->ssid, bss->ssid_len),
- bss->ssid_len);
- break;
-
- case WLAN_EID_SUPP_RATES:
- n_basic_rates = min_t(uint8_t, MAX_RATES, pos[1]);
- memcpy(bss->rates, pos + 2, n_basic_rates);
- got_basic_rates = 1;
- lbs_deb_scan("got RATES IE\n");
- break;
-
- case WLAN_EID_FH_PARAMS:
- fh = (struct ieee_ie_fh_param_set *) pos;
- memcpy(&bss->phy.fh, fh, sizeof(*fh));
- lbs_deb_scan("got FH IE\n");
- break;
-
- case WLAN_EID_DS_PARAMS:
- ds = (struct ieee_ie_ds_param_set *) pos;
- bss->channel = ds->channel;
- memcpy(&bss->phy.ds, ds, sizeof(*ds));
- lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
- break;
-
- case WLAN_EID_CF_PARAMS:
- cf = (struct ieee_ie_cf_param_set *) pos;
- memcpy(&bss->ss.cf, cf, sizeof(*cf));
- lbs_deb_scan("got CF IE\n");
- break;
-
- case WLAN_EID_IBSS_PARAMS:
- ibss = (struct ieee_ie_ibss_param_set *) pos;
- bss->atimwindow = ibss->atimwindow;
- memcpy(&bss->ss.ibss, ibss, sizeof(*ibss));
- lbs_deb_scan("got IBSS IE\n");
- break;
-
- case WLAN_EID_EXT_SUPP_RATES:
- /* only process extended supported rate if data rate is
- * already found. Data rate IE should come before
- * extended supported rate IE
- */
- lbs_deb_scan("got RATESEX IE\n");
- if (!got_basic_rates) {
- lbs_deb_scan("... but ignoring it\n");
- break;
- }
-
- n_ex_rates = pos[1];
- if (n_basic_rates + n_ex_rates > MAX_RATES)
- n_ex_rates = MAX_RATES - n_basic_rates;
-
- p = bss->rates + n_basic_rates;
- memcpy(p, pos + 2, n_ex_rates);
- break;
-
- case WLAN_EID_GENERIC:
- if (pos[1] >= 4 &&
- pos[2] == 0x00 && pos[3] == 0x50 &&
- pos[4] == 0xf2 && pos[5] == 0x01) {
- bss->wpa_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
- memcpy(bss->wpa_ie, pos, bss->wpa_ie_len);
- lbs_deb_scan("got WPA IE\n");
- lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie,
- bss->wpa_ie_len);
- } else if (pos[1] >= MARVELL_MESH_IE_LENGTH &&
- pos[2] == 0x00 && pos[3] == 0x50 &&
- pos[4] == 0x43 && pos[5] == 0x04) {
- lbs_deb_scan("got mesh IE\n");
- bss->mesh = 1;
- } else {
- lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n",
- pos[2], pos[3],
- pos[4], pos[5],
- pos[1]);
- }
- break;
-
- case WLAN_EID_RSN:
- lbs_deb_scan("got RSN IE\n");
- bss->rsn_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
- memcpy(bss->rsn_ie, pos, bss->rsn_ie_len);
- lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE",
- bss->rsn_ie, bss->rsn_ie_len);
- break;
-
- default:
- lbs_deb_scan("got IE 0x%04x, len %d\n",
- pos[0], pos[1]);
- break;
- }
-
- pos += pos[1] + 2;
- }
-
- /* Timestamp */
- bss->last_scanned = jiffies;
- lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates));
-
- ret = 0;
-
-done:
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Send a scan command for all available channels filtered on a spec
- *
- * Used in association code and from debugfs
- *
- * @param priv A pointer to struct lbs_private structure
- * @param ssid A pointer to the SSID to scan for
- * @param ssid_len Length of the SSID
- *
- * @return 0-success, otherwise fail
- */
-int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid,
- uint8_t ssid_len)
-{
- DECLARE_SSID_BUF(ssid_buf);
- int ret = 0;
-
- lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n",
- print_ssid(ssid_buf, ssid, ssid_len));
-
- if (!ssid_len)
- goto out;
-
- memcpy(priv->scan_ssid, ssid, ssid_len);
- priv->scan_ssid_len = ssid_len;
-
- lbs_scan_networks(priv, 1);
- if (priv->surpriseremoved) {
- ret = -1;
- goto out;
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
- return ret;
-}
-
-
-
-
-/*********************************************************************/
-/* */
-/* Support for Wireless Extensions */
-/* */
-/*********************************************************************/
-
-
-#define MAX_CUSTOM_LEN 64
-
-static inline char *lbs_translate_scan(struct lbs_private *priv,
- struct iw_request_info *info,
- char *start, char *stop,
- struct bss_descriptor *bss)
-{
- struct chan_freq_power *cfp;
- char *current_val; /* For rates */
- struct iw_event iwe; /* Temporary buffer */
- int j;
-#define PERFECT_RSSI ((uint8_t)50)
-#define WORST_RSSI ((uint8_t)0)
-#define RSSI_DIFF ((uint8_t)(PERFECT_RSSI - WORST_RSSI))
- uint8_t rssi;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- cfp = lbs_find_cfp_by_band_and_channel(priv, 0, bss->channel);
- if (!cfp) {
- lbs_deb_scan("Invalid channel number %d\n", bss->channel);
- start = NULL;
- goto out;
- }
-
- /* First entry *MUST* be the BSSID */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, &bss->bssid, ETH_ALEN);
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
-
- /* SSID */
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
- start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
-
- /* Mode */
- iwe.cmd = SIOCGIWMODE;
- iwe.u.mode = bss->mode;
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
-
- /* Frequency */
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = (long)cfp->freq * 100000;
- iwe.u.freq.e = 1;
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.updated = IW_QUAL_ALL_UPDATED;
- iwe.u.qual.level = SCAN_RSSI(bss->rssi);
-
- rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE;
- iwe.u.qual.qual =
- (100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) *
- (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) /
- (RSSI_DIFF * RSSI_DIFF);
- if (iwe.u.qual.qual > 100)
- iwe.u.qual.qual = 100;
-
- if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
- iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
- } else {
- iwe.u.qual.noise = CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
- }
-
- /* Locally created ad-hoc BSSs won't have beacons if this is the
- * only station in the adhoc network; so get signal strength
- * from receive statistics.
- */
- if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate
- && !lbs_ssid_cmp(priv->curbssparams.ssid,
- priv->curbssparams.ssid_len,
- bss->ssid, bss->ssid_len)) {
- int snr, nf;
- snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
- nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
- iwe.u.qual.level = CAL_RSSI(snr, nf);
- }
- start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if (bss->capability & WLAN_CAPABILITY_PRIVACY) {
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- } else {
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- }
- iwe.u.data.length = 0;
- start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
-
- current_val = start + iwe_stream_lcp_len(info);
-
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = 0;
- iwe.u.bitrate.disabled = 0;
- iwe.u.bitrate.value = 0;
-
- for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) {
- /* Bit rate given in 500 kb/s units */
- iwe.u.bitrate.value = bss->rates[j] * 500000;
- current_val = iwe_stream_add_value(info, start, current_val,
- stop, &iwe, IW_EV_PARAM_LEN);
- }
- if ((bss->mode == IW_MODE_ADHOC) && priv->adhoccreate
- && !lbs_ssid_cmp(priv->curbssparams.ssid,
- priv->curbssparams.ssid_len,
- bss->ssid, bss->ssid_len)) {
- iwe.u.bitrate.value = 22 * 500000;
- current_val = iwe_stream_add_value(info, start, current_val,
- stop, &iwe, IW_EV_PARAM_LEN);
- }
- /* Check if we added any event */
- if ((current_val - start) > iwe_stream_lcp_len(info))
- start = current_val;
-
- memset(&iwe, 0, sizeof(iwe));
- if (bss->wpa_ie_len) {
- char buf[MAX_WPA_IE_LEN];
- memcpy(buf, bss->wpa_ie, bss->wpa_ie_len);
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = bss->wpa_ie_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- if (bss->rsn_ie_len) {
- char buf[MAX_WPA_IE_LEN];
- memcpy(buf, bss->rsn_ie, bss->rsn_ie_len);
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = bss->rsn_ie_len;
- start = iwe_stream_add_point(info, start, stop, &iwe, buf);
- }
-
- if (bss->mesh) {
- char custom[MAX_CUSTOM_LEN];
- char *p = custom;
-
- iwe.cmd = IWEVCUSTOM;
- p += snprintf(p, MAX_CUSTOM_LEN, "mesh-type: olpc");
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop,
- &iwe, custom);
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start);
- return start;
-}
-
-
-/**
- * @brief Handle Scan Network ioctl
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param vwrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- *
- * @return 0 --success, otherwise fail
- */
-int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- DECLARE_SSID_BUF(ssid);
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (!priv->radio_on) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!netif_running(dev)) {
- ret = -ENETDOWN;
- goto out;
- }
-
- /* mac80211 does this:
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->type != IEEE80211_IF_TYPE_xxx) {
- ret = -EOPNOTSUPP;
- goto out;
- }
- */
-
- if (wrqu->data.length == sizeof(struct iw_scan_req) &&
- wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- priv->scan_ssid_len = req->essid_len;
- memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
- lbs_deb_wext("set_scan, essid '%s'\n",
- print_ssid(ssid, priv->scan_ssid, priv->scan_ssid_len));
- } else {
- priv->scan_ssid_len = 0;
- }
-
- if (!delayed_work_pending(&priv->scan_work))
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(50));
- /* set marker that currently a scan is taking place */
- priv->scan_channel = -1;
-
- if (priv->surpriseremoved)
- ret = -EIO;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-
-/**
- * @brief Handle Retrieve scan table ioctl
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param dwrq A pointer to iw_point structure
- * @param extra A pointer to extra data buf
- *
- * @return 0 --success, otherwise fail
- */
-int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
-#define SCAN_ITEM_SIZE 128
- struct lbs_private *priv = dev->ml_priv;
- int err = 0;
- char *ev = extra;
- char *stop = ev + dwrq->length;
- struct bss_descriptor *iter_bss;
- struct bss_descriptor *safe;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /* iwlist should wait until the current scan is finished */
- if (priv->scan_channel)
- return -EAGAIN;
-
- /* Update RSSI if current BSS is a locally created ad-hoc BSS */
- if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
- err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
- CMD_OPTION_WAITFORRSP, 0, NULL);
- if (err)
- goto out;
- }
-
- mutex_lock(&priv->lock);
- list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
- char *next_ev;
- unsigned long stale_time;
-
- if (stop - ev < SCAN_ITEM_SIZE) {
- err = -E2BIG;
- break;
- }
-
- /* For mesh device, list only mesh networks */
- if (dev == priv->mesh_dev && !iter_bss->mesh)
- continue;
-
- /* Prune old an old scan result */
- stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
- if (time_after(jiffies, stale_time)) {
- list_move_tail(&iter_bss->list, &priv->network_free_list);
- clear_bss_descriptor(iter_bss);
- continue;
- }
-
- /* Translate to WE format this entry */
- next_ev = lbs_translate_scan(priv, info, ev, stop, iter_bss);
- if (next_ev == NULL)
- continue;
- ev = next_ev;
- }
- mutex_unlock(&priv->lock);
-
- dwrq->length = (ev - extra);
- dwrq->flags = 0;
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
- return err;
-}
-
-
-
-
-/*********************************************************************/
-/* */
-/* Command execution */
-/* */
-/*********************************************************************/
-
-
-/**
- * @brief This function handles the command response of scan
- *
- * Called from handle_cmd_response() in cmdrespc.
- *
- * The response buffer for the scan command has the following
- * memory layout:
- *
- * .-----------------------------------------------------------.
- * | header (4 * sizeof(u16)): Standard command response hdr |
- * .-----------------------------------------------------------.
- * | bufsize (u16) : sizeof the BSS Description data |
- * .-----------------------------------------------------------.
- * | NumOfSet (u8) : Number of BSS Descs returned |
- * .-----------------------------------------------------------.
- * | BSSDescription data (variable, size given in bufsize) |
- * .-----------------------------------------------------------.
- * | TLV data (variable, size calculated using header->size, |
- * | bufsize and sizeof the fixed fields above) |
- * .-----------------------------------------------------------.
- *
- * @param priv A pointer to struct lbs_private structure
- * @param resp A pointer to cmd_ds_command
- *
- * @return 0 or -1
- */
-static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
- struct cmd_header *resp)
-{
- struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
- struct bss_descriptor *iter_bss;
- struct bss_descriptor *safe;
- uint8_t *bssinfo;
- uint16_t scanrespsize;
- int bytesleft;
- int idx;
- int tlvbufsize;
- int ret;
-
- lbs_deb_enter(LBS_DEB_SCAN);
-
- /* Prune old entries from scan table */
- list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
- unsigned long stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
- if (time_before(jiffies, stale_time))
- continue;
- list_move_tail (&iter_bss->list, &priv->network_free_list);
- clear_bss_descriptor(iter_bss);
- }
-
- if (scanresp->nr_sets > MAX_NETWORK_COUNT) {
- lbs_deb_scan("SCAN_RESP: too many scan results (%d, max %d)\n",
- scanresp->nr_sets, MAX_NETWORK_COUNT);
- ret = -1;
- goto done;
- }
-
- bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize);
- lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft);
-
- scanrespsize = le16_to_cpu(resp->size);
- lbs_deb_scan("SCAN_RESP: scan results %d\n", scanresp->nr_sets);
-
- bssinfo = scanresp->bssdesc_and_tlvbuffer;
-
- /* The size of the TLV buffer is equal to the entire command response
- * size (scanrespsize) minus the fixed fields (sizeof()'s), the
- * BSS Descriptions (bssdescriptsize as bytesLef) and the command
- * response header (sizeof(struct cmd_header))
- */
- tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
- + sizeof(scanresp->nr_sets)
- + sizeof(struct cmd_header));
-
- /*
- * Process each scan response returned (scanresp->nr_sets). Save
- * the information in the newbssentry and then insert into the
- * driver scan table either as an update to an existing entry
- * or as an addition at the end of the table
- */
- for (idx = 0; idx < scanresp->nr_sets && bytesleft; idx++) {
- struct bss_descriptor new;
- struct bss_descriptor *found = NULL;
- struct bss_descriptor *oldest = NULL;
-
- /* Process the data fields and IEs returned for this BSS */
- memset(&new, 0, sizeof (struct bss_descriptor));
- if (lbs_process_bss(&new, &bssinfo, &bytesleft) != 0) {
- /* error parsing the scan response, skipped */
- lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n");
- continue;
- }
-
- /* Try to find this bss in the scan table */
- list_for_each_entry (iter_bss, &priv->network_list, list) {
- if (is_same_network(iter_bss, &new)) {
- found = iter_bss;
- break;
- }
-
- if ((oldest == NULL) ||
- (iter_bss->last_scanned < oldest->last_scanned))
- oldest = iter_bss;
- }
-
- if (found) {
- /* found, clear it */
- clear_bss_descriptor(found);
- } else if (!list_empty(&priv->network_free_list)) {
- /* Pull one from the free list */
- found = list_entry(priv->network_free_list.next,
- struct bss_descriptor, list);
- list_move_tail(&found->list, &priv->network_list);
- } else if (oldest) {
- /* If there are no more slots, expire the oldest */
- found = oldest;
- clear_bss_descriptor(found);
- list_move_tail(&found->list, &priv->network_list);
- } else {
- continue;
- }
-
- lbs_deb_scan("SCAN_RESP: BSSID %pM\n", new.bssid);
-
- /* Copy the locally created newbssentry to the scan table */
- memcpy(found, &new, offsetof(struct bss_descriptor, list));
- }
-
- ret = 0;
-
-done:
- lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
- return ret;
-}
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
deleted file mode 100644
index 8fb1706..0000000
--- a/drivers/net/wireless/libertas/scan.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Interface for the wlan network scan routines
- *
- * Driver interface functions and type declarations for the scan module
- * implemented in scan.c.
- */
-#ifndef _LBS_SCAN_H
-#define _LBS_SCAN_H
-
-#include <net/iw_handler.h>
-
-struct lbs_private;
-
-#define MAX_NETWORK_COUNT 128
-
-/** Chan-freq-TxPower mapping table*/
-struct chan_freq_power {
- /** channel Number */
- u16 channel;
- /** frequency of this channel */
- u32 freq;
- /** Max allowed Tx power level */
- u16 maxtxpower;
- /** TRUE:channel unsupported; FLASE:supported*/
- u8 unsupported;
-};
-
-/** region-band mapping table*/
-struct region_channel {
- /** TRUE if this entry is valid */
- u8 valid;
- /** region code for US, Japan ... */
- u8 region;
- /** band B/G/A, used for BAND_CONFIG cmd */
- u8 band;
- /** Actual No. of elements in the array below */
- u8 nrcfp;
- /** chan-freq-txpower mapping table*/
- struct chan_freq_power *CFP;
-};
-
-/**
- * @brief Maximum number of channels that can be sent in a setuserscan ioctl
- */
-#define LBS_IOCTL_USER_SCAN_CHAN_MAX 50
-
-int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
-
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
-
-int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
- u8 ssid_len);
-
-int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra);
-int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-int lbs_scan_networks(struct lbs_private *priv, int full_scan);
-
-void lbs_scan_worker(struct work_struct *work);
-
-#endif
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index a9bf658..411a3bb 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -4,13 +4,13 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/sched.h>
+#include <net/cfg80211.h>
#include "host.h"
#include "radiotap.h"
#include "decl.h"
#include "defs.h"
#include "dev.h"
-#include "wext.h"
/**
* @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE
@@ -111,7 +111,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
p802x_hdr = skb->data;
pkt_len = skb->len;
- if (dev == priv->rtap_net_dev) {
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data;
/* set txpd fields from the radiotap header */
@@ -147,7 +147,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (priv->monitormode) {
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
/* Keep the skb to echo it back once Tx feedback is
received from FW */
skb_orphan(skb);
@@ -158,6 +158,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
free:
dev_kfree_skb_any(skb);
}
+
unlock:
spin_unlock_irqrestore(&priv->driver_lock, flags);
wake_up(&priv->waitq);
@@ -179,7 +180,8 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
{
struct tx_radiotap_hdr *radiotap_hdr;
- if (!priv->monitormode || priv->currenttxskb == NULL)
+ if (!priv->wdev->iftype == NL80211_IFTYPE_MONITOR ||
+ priv->currenttxskb == NULL)
return;
radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data;
@@ -188,7 +190,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
(1 + priv->txretrycount - try_count) : 0;
priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb,
- priv->rtap_net_dev);
+ priv->dev);
netif_rx(priv->currenttxskb);
priv->currenttxskb = NULL;
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
deleted file mode 100644
index f96a960..0000000
--- a/drivers/net/wireless/libertas/wext.c
+++ /dev/null
@@ -1,2353 +0,0 @@
-/**
- * This file contains ioctl functions
- */
-#include <linux/ctype.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/if.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/bitops.h>
-
-#include <net/lib80211.h>
-#include <net/iw_handler.h>
-
-#include "host.h"
-#include "radiotap.h"
-#include "decl.h"
-#include "defs.h"
-#include "dev.h"
-#include "wext.h"
-#include "scan.h"
-#include "assoc.h"
-#include "cmd.h"
-
-
-static inline void lbs_postpone_association_work(struct lbs_private *priv)
-{
- if (priv->surpriseremoved)
- return;
- cancel_delayed_work(&priv->assoc_work);
- queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2);
-}
-
-static inline void lbs_do_association_work(struct lbs_private *priv)
-{
- if (priv->surpriseremoved)
- return;
- cancel_delayed_work(&priv->assoc_work);
- queue_delayed_work(priv->work_thread, &priv->assoc_work, 0);
-}
-
-static inline void lbs_cancel_association_work(struct lbs_private *priv)
-{
- cancel_delayed_work(&priv->assoc_work);
- kfree(priv->pending_assoc_req);
- priv->pending_assoc_req = NULL;
-}
-
-void lbs_send_disconnect_notification(struct lbs_private *priv)
-{
- union iwreq_data wrqu;
-
- memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
-{
- union iwreq_data iwrq;
- u8 buf[50];
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- memset(&iwrq, 0, sizeof(union iwreq_data));
- memset(buf, 0, sizeof(buf));
-
- snprintf(buf, sizeof(buf) - 1, "%s", str);
-
- iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
-
- /* Send Event to upper layer */
- lbs_deb_wext("event indication string %s\n", (char *)buf);
- lbs_deb_wext("event indication length %d\n", iwrq.data.length);
- lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
-
- wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
-
- lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-/**
- * @brief This function handles MIC failure event.
- *
- * @param priv A pointer to struct lbs_private structure
- * @para event the event id
- * @return n/a
- */
-void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
-{
- char buf[50];
-
- lbs_deb_enter(LBS_DEB_CMD);
- memset(buf, 0, sizeof(buf));
-
- sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
-
- if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
- strcat(buf, "unicast ");
- else
- strcat(buf, "multicast ");
-
- lbs_send_iwevcustom_event(priv, buf);
- lbs_deb_leave(LBS_DEB_CMD);
-}
-
-/**
- * @brief Find the channel frequency power info with specific channel
- *
- * @param priv A pointer to struct lbs_private structure
- * @param band it can be BAND_A, BAND_G or BAND_B
- * @param channel the channel for looking
- * @return A pointer to struct chan_freq_power structure or NULL if not find.
- */
-struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
- struct lbs_private *priv,
- u8 band,
- u16 channel)
-{
- struct chan_freq_power *cfp = NULL;
- struct region_channel *rc;
- int i, j;
-
- for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
- rc = &priv->region_channel[j];
-
- if (!rc->valid || !rc->CFP)
- continue;
- if (rc->band != band)
- continue;
- for (i = 0; i < rc->nrcfp; i++) {
- if (rc->CFP[i].channel == channel) {
- cfp = &rc->CFP[i];
- break;
- }
- }
- }
-
- if (!cfp && channel)
- lbs_deb_wext("lbs_find_cfp_by_band_and_channel: can't find "
- "cfp by band %d / channel %d\n", band, channel);
-
- return cfp;
-}
-
-/**
- * @brief Find the channel frequency power info with specific frequency
- *
- * @param priv A pointer to struct lbs_private structure
- * @param band it can be BAND_A, BAND_G or BAND_B
- * @param freq the frequency for looking
- * @return A pointer to struct chan_freq_power structure or NULL if not find.
- */
-static struct chan_freq_power *find_cfp_by_band_and_freq(
- struct lbs_private *priv,
- u8 band,
- u32 freq)
-{
- struct chan_freq_power *cfp = NULL;
- struct region_channel *rc;
- int i, j;
-
- for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
- rc = &priv->region_channel[j];
-
- if (!rc->valid || !rc->CFP)
- continue;
- if (rc->band != band)
- continue;
- for (i = 0; i < rc->nrcfp; i++) {
- if (rc->CFP[i].freq == freq) {
- cfp = &rc->CFP[i];
- break;
- }
- }
- }
-
- if (!cfp && freq)
- lbs_deb_wext("find_cfp_by_band_and_freql: can't find cfp by "
- "band %d / freq %d\n", band, freq);
-
- return cfp;
-}
-
-/**
- * @brief Copy active data rates based on adapter mode and status
- *
- * @param priv A pointer to struct lbs_private structure
- * @param rate The buf to return the active rates
- */
-static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
-{
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if ((priv->connect_status != LBS_CONNECTED) &&
- !lbs_mesh_connected(priv))
- memcpy(rates, lbs_bg_rates, MAX_RATES);
- else
- memcpy(rates, priv->curbssparams.rates, MAX_RATES);
-
- lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-static int lbs_get_name(struct net_device *dev, struct iw_request_info *info,
- char *cwrq, char *extra)
-{
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /* We could add support for 802.11n here as needed. Jean II */
- snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g");
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- struct chan_freq_power *cfp;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
- priv->channel);
-
- if (!cfp) {
- if (priv->channel)
- lbs_deb_wext("invalid channel %d\n",
- priv->channel);
- return -EINVAL;
- }
-
- fwrq->m = (long)cfp->freq * 100000;
- fwrq->e = 1;
-
- lbs_deb_wext("freq %u\n", fwrq->m);
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
- struct sockaddr *awrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (priv->connect_status == LBS_CONNECTED) {
- memcpy(awrq->sa_data, priv->curbssparams.bssid, ETH_ALEN);
- } else {
- memset(awrq->sa_data, 0, ETH_ALEN);
- }
- awrq->sa_family = ARPHRD_ETHER;
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /*
- * Check the size of the string
- */
-
- if (dwrq->length > 16) {
- return -E2BIG;
- }
-
- mutex_lock(&priv->lock);
- memset(priv->nodename, 0, sizeof(priv->nodename));
- memcpy(priv->nodename, extra, dwrq->length);
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- dwrq->length = strlen(priv->nodename);
- memcpy(extra, priv->nodename, dwrq->length);
- extra[dwrq->length] = '\0';
-
- dwrq->flags = 1; /* active */
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /* Use nickname to indicate that mesh is on */
-
- if (lbs_mesh_connected(priv)) {
- strncpy(extra, "Mesh", 12);
- extra[12] = '\0';
- dwrq->length = strlen(extra);
- }
-
- else {
- extra[0] = '\0';
- dwrq->length = 0;
- }
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-#endif
-
-static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
- u32 val = vwrq->value;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (vwrq->disabled)
- val = MRVDRV_RTS_MAX_VALUE;
-
- if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */
- return -EINVAL;
-
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- u16 val = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val);
- if (ret)
- goto out;
-
- vwrq->value = val;
- vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */
- vwrq->fixed = 1;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- u32 val = vwrq->value;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (vwrq->disabled)
- val = MRVDRV_FRAG_MAX_VALUE;
-
- if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE)
- return -EINVAL;
-
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- u16 val = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val);
- if (ret)
- goto out;
-
- vwrq->value = val;
- vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE)
- || (val > MRVDRV_FRAG_MAX_VALUE));
- vwrq->fixed = 1;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_mode(struct net_device *dev,
- struct iw_request_info *info, u32 * uwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- *uwrq = priv->mode;
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int mesh_wlan_get_mode(struct net_device *dev,
- struct iw_request_info *info, u32 * uwrq,
- char *extra)
-{
- lbs_deb_enter(LBS_DEB_WEXT);
-
- *uwrq = IW_MODE_REPEAT;
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-#endif
-
-static int lbs_get_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- s16 curlevel = 0;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (!priv->radio_on) {
- lbs_deb_wext("tx power off\n");
- vwrq->value = 0;
- vwrq->disabled = 1;
- goto out;
- }
-
- ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL);
- if (ret)
- goto out;
-
- lbs_deb_wext("tx power level %d dbm\n", curlevel);
- priv->txpower_cur = curlevel;
-
- vwrq->value = curlevel;
- vwrq->fixed = 1;
- vwrq->disabled = 0;
- vwrq->flags = IW_TXPOW_DBM;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- u16 slimit = 0, llimit = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
- return -EOPNOTSUPP;
-
- /* The MAC has a 4-bit Total_Tx_Count register
- Total_Tx_Count = 1 + Tx_Retry_Count */
-#define TX_RETRY_MIN 0
-#define TX_RETRY_MAX 14
- if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
- return -EINVAL;
-
- /* Add 1 to convert retry count to try count */
- if (vwrq->flags & IW_RETRY_SHORT)
- slimit = (u16) (vwrq->value + 1);
- else if (vwrq->flags & IW_RETRY_LONG)
- llimit = (u16) (vwrq->value + 1);
- else
- slimit = llimit = (u16) (vwrq->value + 1); /* set both */
-
- if (llimit) {
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT,
- llimit);
- if (ret)
- goto out;
- }
-
- if (slimit) {
- /* txretrycount follows the short retry limit */
- priv->txretrycount = slimit;
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT,
- slimit);
- if (ret)
- goto out;
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- u16 val = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- vwrq->disabled = 0;
-
- if (vwrq->flags & IW_RETRY_LONG) {
- ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val);
- if (ret)
- goto out;
-
- /* Subtract 1 to convert try count to retry count */
- vwrq->value = val - 1;
- vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- } else {
- ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val);
- if (ret)
- goto out;
-
- /* txretry count follows the short retry limit */
- priv->txretrycount = val;
- /* Subtract 1 to convert try count to retry count */
- vwrq->value = val - 1;
- vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
- }
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static inline void sort_channels(struct iw_freq *freq, int num)
-{
- int i, j;
- struct iw_freq temp;
-
- for (i = 0; i < num; i++)
- for (j = i + 1; j < num; j++)
- if (freq[i].i > freq[j].i) {
- temp.i = freq[i].i;
- temp.m = freq[i].m;
-
- freq[i].i = freq[j].i;
- freq[i].m = freq[j].m;
-
- freq[j].i = temp.i;
- freq[j].m = temp.m;
- }
-}
-
-/* data rate listing
- MULTI_BANDS:
- abg a b b/g
- Infra G(12) A(8) B(4) G(12)
- Adhoc A+B(12) A(8) B(4) B(4)
-
- non-MULTI_BANDS:
- b b/g
- Infra B(4) G(12)
- Adhoc B(4) B(4)
- */
-/**
- * @brief Get Range Info
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param vwrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- * @return 0 --success, otherwise fail
- */
-static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- int i, j;
- struct lbs_private *priv = dev->ml_priv;
- struct iw_range *range = (struct iw_range *)extra;
- struct chan_freq_power *cfp;
- u8 rates[MAX_RATES + 1];
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- dwrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(struct iw_range));
-
- range->min_nwid = 0;
- range->max_nwid = 0;
-
- memset(rates, 0, sizeof(rates));
- copy_active_data_rates(priv, rates);
- range->num_bitrates = strnlen(rates, IW_MAX_BITRATES);
- for (i = 0; i < range->num_bitrates; i++)
- range->bitrate[i] = rates[i] * 500000;
- range->num_bitrates = i;
- lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES,
- range->num_bitrates);
-
- range->num_frequency = 0;
-
- range->scan_capa = IW_SCAN_CAPA_ESSID;
-
- for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
- && (j < ARRAY_SIZE(priv->region_channel)); j++) {
- cfp = priv->region_channel[j].CFP;
- for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
- && priv->region_channel[j].valid
- && cfp
- && (i < priv->region_channel[j].nrcfp); i++) {
- range->freq[range->num_frequency].i =
- (long)cfp->channel;
- range->freq[range->num_frequency].m =
- (long)cfp->freq * 100000;
- range->freq[range->num_frequency].e = 1;
- cfp++;
- range->num_frequency++;
- }
- }
-
- lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
- IW_MAX_FREQUENCIES, range->num_frequency);
-
- range->num_channels = range->num_frequency;
-
- sort_channels(&range->freq[0], range->num_frequency);
-
- /*
- * Set an indication of the max TCP throughput in bit/s that we can
- * expect using this interface
- */
- if (i > 2)
- range->throughput = 5000 * 1000;
- else
- range->throughput = 1500 * 1000;
-
- range->min_rts = MRVDRV_RTS_MIN_VALUE;
- range->max_rts = MRVDRV_RTS_MAX_VALUE;
- range->min_frag = MRVDRV_FRAG_MIN_VALUE;
- range->max_frag = MRVDRV_FRAG_MAX_VALUE;
-
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
- range->num_encoding_sizes = 2;
- range->max_encoding_tokens = 4;
-
- /*
- * Right now we support only "iwconfig ethX power on|off"
- */
- range->pm_capa = IW_POWER_ON;
-
- /*
- * Minimum version we recommend
- */
- range->we_version_source = 15;
-
- /*
- * Version we are compiled with
- */
- range->we_version_compiled = WIRELESS_EXT;
-
- range->retry_capa = IW_RETRY_LIMIT;
- range->retry_flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
-
- range->min_retry = TX_RETRY_MIN;
- range->max_retry = TX_RETRY_MAX;
-
- /*
- * Set the qual, level and noise range values
- */
- range->max_qual.qual = 100;
- range->max_qual.level = 0;
- range->max_qual.noise = 0;
- range->max_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
- range->avg_qual.qual = 70;
- /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
- range->avg_qual.level = 0;
- range->avg_qual.noise = 0;
- range->avg_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
- range->sensitivity = 0;
-
- /* Setup the supported power level ranges */
- memset(range->txpower, 0, sizeof(range->txpower));
- range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE;
- range->txpower[0] = priv->txpower_min;
- range->txpower[1] = priv->txpower_max;
- range->num_txpower = 2;
-
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
-
- if (priv->fwcapinfo & FW_CAPINFO_WPA) {
- range->enc_capa = IW_ENC_CAPA_WPA
- | IW_ENC_CAPA_WPA2
- | IW_ENC_CAPA_CIPHER_TKIP
- | IW_ENC_CAPA_CIPHER_CCMP;
- }
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (!(priv->fwcapinfo & FW_CAPINFO_PS)) {
- if (vwrq->disabled)
- return 0;
- else
- return -EINVAL;
- }
-
- /* PS is currently supported only in Infrastructure mode
- * Remove this check if it is to be supported in IBSS mode also
- */
-
- if (vwrq->disabled) {
- priv->psmode = LBS802_11POWERMODECAM;
- if (priv->psstate != PS_STATE_FULL_POWER) {
- lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
- }
-
- return 0;
- }
-
- if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- lbs_deb_wext(
- "setting power timeout is not supported\n");
- return -EINVAL;
- } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
- vwrq->value = vwrq->value / 1000;
- if (!priv->enter_deep_sleep) {
- lbs_pr_err("deep sleep feature is not implemented "
- "for this interface driver\n");
- return -EINVAL;
- }
-
- if (priv->connect_status == LBS_CONNECTED) {
- if ((priv->is_auto_deep_sleep_enabled) &&
- (vwrq->value == -1000)) {
- lbs_exit_auto_deep_sleep(priv);
- return 0;
- } else {
- lbs_pr_err("can't use deep sleep cmd in "
- "connected state\n");
- return -EINVAL;
- }
- }
-
- if ((vwrq->value < 0) && (vwrq->value != -1000)) {
- lbs_pr_err("unknown option\n");
- return -EINVAL;
- }
-
- if (vwrq->value > 0) {
- if (!priv->is_auto_deep_sleep_enabled) {
- priv->is_activity_detected = 0;
- priv->auto_deep_sleep_timeout = vwrq->value;
- lbs_enter_auto_deep_sleep(priv);
- } else {
- priv->auto_deep_sleep_timeout = vwrq->value;
- lbs_deb_debugfs("auto deep sleep: "
- "already enabled\n");
- }
- return 0;
- } else {
- if (priv->is_auto_deep_sleep_enabled) {
- lbs_exit_auto_deep_sleep(priv);
- /* Try to exit deep sleep if auto */
- /*deep sleep disabled */
- ret = lbs_set_deep_sleep(priv, 0);
- }
- if (vwrq->value == 0)
- ret = lbs_set_deep_sleep(priv, 1);
- else if (vwrq->value == -1000)
- ret = lbs_set_deep_sleep(priv, 0);
- return ret;
- }
- }
-
- if (priv->psmode != LBS802_11POWERMODECAM) {
- return 0;
- }
-
- priv->psmode = LBS802_11POWERMODEMAX_PSP;
-
- if (priv->connect_status == LBS_CONNECTED) {
- lbs_ps_sleep(priv, CMD_OPTION_WAITFORRSP);
- }
-
- lbs_deb_leave(LBS_DEB_WEXT);
-
- return 0;
-}
-
-static int lbs_get_power(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- vwrq->value = 0;
- vwrq->flags = 0;
- vwrq->disabled = priv->psmode == LBS802_11POWERMODECAM
- || priv->connect_status == LBS_DISCONNECTED;
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
-{
- enum {
- POOR = 30,
- FAIR = 60,
- GOOD = 80,
- VERY_GOOD = 90,
- EXCELLENT = 95,
- PERFECT = 100
- };
- struct lbs_private *priv = dev->ml_priv;
- u32 rssi_qual;
- u32 tx_qual;
- u32 quality = 0;
- int ret, stats_valid = 0;
- u8 rssi;
- u32 tx_retries;
- struct cmd_ds_802_11_get_log log;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- priv->wstats.status = priv->mode;
-
- /* If we're not associated, all quality values are meaningless */
- if ((priv->connect_status != LBS_CONNECTED) &&
- !lbs_mesh_connected(priv))
- goto out;
-
- /* Quality by RSSI */
- priv->wstats.qual.level =
- CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
- priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-
- if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
- priv->wstats.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
- } else {
- priv->wstats.qual.noise =
- CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
- }
-
- lbs_deb_wext("signal level %#x\n", priv->wstats.qual.level);
- lbs_deb_wext("noise %#x\n", priv->wstats.qual.noise);
-
- rssi = priv->wstats.qual.level - priv->wstats.qual.noise;
- if (rssi < 15)
- rssi_qual = rssi * POOR / 10;
- else if (rssi < 20)
- rssi_qual = (rssi - 15) * (FAIR - POOR) / 5 + POOR;
- else if (rssi < 30)
- rssi_qual = (rssi - 20) * (GOOD - FAIR) / 5 + FAIR;
- else if (rssi < 40)
- rssi_qual = (rssi - 30) * (VERY_GOOD - GOOD) /
- 10 + GOOD;
- else
- rssi_qual = (rssi - 40) * (PERFECT - VERY_GOOD) /
- 10 + VERY_GOOD;
- quality = rssi_qual;
-
- /* Quality by TX errors */
- priv->wstats.discard.retries = dev->stats.tx_errors;
-
- memset(&log, 0, sizeof(log));
- log.hdr.size = cpu_to_le16(sizeof(log));
- ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
- if (ret)
- goto out;
-
- tx_retries = le32_to_cpu(log.retry);
-
- if (tx_retries > 75)
- tx_qual = (90 - tx_retries) * POOR / 15;
- else if (tx_retries > 70)
- tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
- else if (tx_retries > 65)
- tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
- else if (tx_retries > 50)
- tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
- 15 + GOOD;
- else
- tx_qual = (50 - tx_retries) *
- (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
- quality = min(quality, tx_qual);
-
- priv->wstats.discard.code = le32_to_cpu(log.wepundecryptable);
- priv->wstats.discard.retries = tx_retries;
- priv->wstats.discard.misc = le32_to_cpu(log.ackfailure);
-
- /* Calculate quality */
- priv->wstats.qual.qual = min_t(u8, quality, 100);
- priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- stats_valid = 1;
-
- /* update stats asynchronously for future calls */
- ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
- 0, 0, NULL);
- if (ret)
- lbs_pr_err("RSSI command failed\n");
-out:
- if (!stats_valid) {
- priv->wstats.miss.beacon = 0;
- priv->wstats.discard.retries = 0;
- priv->wstats.qual.qual = 0;
- priv->wstats.qual.level = 0;
- priv->wstats.qual.noise = 0;
- priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED;
- priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID |
- IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
- }
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return &priv->wstats;
-
-
-}
-
-static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
-{
- int ret = -EINVAL;
- struct lbs_private *priv = dev->ml_priv;
- struct chan_freq_power *cfp;
- struct assoc_request * assoc_req;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- mutex_lock(&priv->lock);
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* If setting by frequency, convert to a channel */
- if (fwrq->e == 1) {
- long f = fwrq->m / 100000;
-
- cfp = find_cfp_by_band_and_freq(priv, 0, f);
- if (!cfp) {
- lbs_deb_wext("invalid freq %ld\n", f);
- goto out;
- }
-
- fwrq->e = 0;
- fwrq->m = (int) cfp->channel;
- }
-
- /* Setting by channel number */
- if (fwrq->m > 1000 || fwrq->e > 0) {
- goto out;
- }
-
- cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m);
- if (!cfp) {
- goto out;
- }
-
- assoc_req->channel = fwrq->m;
- ret = 0;
-
-out:
- if (ret == 0) {
- set_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- } else {
- lbs_cancel_association_work(priv);
- }
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int lbs_mesh_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- struct chan_freq_power *cfp;
- int ret = -EINVAL;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /* If setting by frequency, convert to a channel */
- if (fwrq->e == 1) {
- long f = fwrq->m / 100000;
-
- cfp = find_cfp_by_band_and_freq(priv, 0, f);
- if (!cfp) {
- lbs_deb_wext("invalid freq %ld\n", f);
- goto out;
- }
-
- fwrq->e = 0;
- fwrq->m = (int) cfp->channel;
- }
-
- /* Setting by channel number */
- if (fwrq->m > 1000 || fwrq->e > 0) {
- goto out;
- }
-
- cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m);
- if (!cfp) {
- goto out;
- }
-
- if (fwrq->m != priv->channel) {
- lbs_deb_wext("mesh channel change forces eth disconnect\n");
- if (priv->mode == IW_MODE_INFRA)
- lbs_cmd_80211_deauthenticate(priv,
- priv->curbssparams.bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- else if (priv->mode == IW_MODE_ADHOC)
- lbs_adhoc_stop(priv);
- }
- lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
- lbs_update_channel(priv);
- ret = 0;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-#endif
-
-static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- u8 new_rate = 0;
- int ret = -EINVAL;
- u8 rates[MAX_RATES + 1];
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- lbs_deb_wext("vwrq->value %d\n", vwrq->value);
- lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
-
- if (vwrq->fixed && vwrq->value == -1)
- goto out;
-
- /* Auto rate? */
- priv->enablehwauto = !vwrq->fixed;
-
- if (vwrq->value == -1)
- priv->cur_rate = 0;
- else {
- if (vwrq->value % 100000)
- goto out;
-
- new_rate = vwrq->value / 500000;
- priv->cur_rate = new_rate;
- /* the rest is only needed for lbs_set_data_rate() */
- memset(rates, 0, sizeof(rates));
- copy_active_data_rates(priv, rates);
- if (!memchr(rates, new_rate, sizeof(rates))) {
- lbs_pr_alert("fixed data rate 0x%X out of range\n",
- new_rate);
- goto out;
- }
- if (priv->fwrelease < 0x09000000) {
- ret = lbs_set_power_adapt_cfg(priv, 0,
- POW_ADAPT_DEFAULT_P0,
- POW_ADAPT_DEFAULT_P1,
- POW_ADAPT_DEFAULT_P2);
- if (ret)
- goto out;
- }
- ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
- TPC_DEFAULT_P2, 1);
- if (ret)
- goto out;
- }
-
- /* Try the newer command first (Firmware Spec 5.1 and above) */
- ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET);
-
- /* Fallback to older version */
- if (ret)
- ret = lbs_set_data_rate(priv, new_rate);
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (priv->connect_status == LBS_CONNECTED) {
- vwrq->value = priv->cur_rate * 500000;
-
- if (priv->enablehwauto)
- vwrq->fixed = 0;
- else
- vwrq->fixed = 1;
-
- } else {
- vwrq->fixed = 0;
- vwrq->value = 0;
- }
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_set_mode(struct net_device *dev,
- struct iw_request_info *info, u32 * uwrq, char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
- struct assoc_request * assoc_req;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if ( (*uwrq != IW_MODE_ADHOC)
- && (*uwrq != IW_MODE_INFRA)
- && (*uwrq != IW_MODE_AUTO)) {
- lbs_deb_wext("Invalid mode: 0x%x\n", *uwrq);
- ret = -EINVAL;
- goto out;
- }
-
- mutex_lock(&priv->lock);
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- lbs_cancel_association_work(priv);
- } else {
- assoc_req->mode = *uwrq;
- set_bit(ASSOC_FLAG_MODE, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- lbs_deb_wext("Switching to mode: 0x%x\n", *uwrq);
- }
- mutex_unlock(&priv->lock);
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-
-/**
- * @brief Get Encryption key
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param vwrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- * @return 0 --success, otherwise fail
- */
-static int lbs_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, u8 * extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- lbs_deb_wext("flags 0x%x, index %d, length %d, wep_tx_keyidx %d\n",
- dwrq->flags, index, dwrq->length, priv->wep_tx_keyidx);
-
- dwrq->flags = 0;
-
- /* Authentication method */
- switch (priv->secinfo.auth_mode) {
- case IW_AUTH_ALG_OPEN_SYSTEM:
- dwrq->flags = IW_ENCODE_OPEN;
- break;
-
- case IW_AUTH_ALG_SHARED_KEY:
- case IW_AUTH_ALG_LEAP:
- dwrq->flags = IW_ENCODE_RESTRICTED;
- break;
- default:
- dwrq->flags = IW_ENCODE_DISABLED | IW_ENCODE_OPEN;
- break;
- }
-
- memset(extra, 0, 16);
-
- mutex_lock(&priv->lock);
-
- /* Default to returning current transmit key */
- if (index < 0)
- index = priv->wep_tx_keyidx;
-
- if ((priv->wep_keys[index].len) && priv->secinfo.wep_enabled) {
- memcpy(extra, priv->wep_keys[index].key,
- priv->wep_keys[index].len);
- dwrq->length = priv->wep_keys[index].len;
-
- dwrq->flags |= (index + 1);
- /* Return WEP enabled */
- dwrq->flags &= ~IW_ENCODE_DISABLED;
- } else if ((priv->secinfo.WPAenabled)
- || (priv->secinfo.WPA2enabled)) {
- /* return WPA enabled */
- dwrq->flags &= ~IW_ENCODE_DISABLED;
- dwrq->flags |= IW_ENCODE_NOKEY;
- } else {
- dwrq->flags |= IW_ENCODE_DISABLED;
- }
-
- mutex_unlock(&priv->lock);
-
- lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n",
- extra[0], extra[1], extra[2],
- extra[3], extra[4], extra[5], dwrq->length);
-
- lbs_deb_wext("return flags 0x%x\n", dwrq->flags);
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-/**
- * @brief Set Encryption key (internal)
- *
- * @param priv A pointer to private card structure
- * @param key_material A pointer to key material
- * @param key_length length of key material
- * @param index key index to set
- * @param set_tx_key Force set TX key (1 = yes, 0 = no)
- * @return 0 --success, otherwise fail
- */
-static int lbs_set_wep_key(struct assoc_request *assoc_req,
- const char *key_material,
- u16 key_length,
- u16 index,
- int set_tx_key)
-{
- int ret = 0;
- struct enc_key *pkey;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /* Paranoid validation of key index */
- if (index > 3) {
- ret = -EINVAL;
- goto out;
- }
-
- /* validate max key length */
- if (key_length > KEY_LEN_WEP_104) {
- ret = -EINVAL;
- goto out;
- }
-
- pkey = &assoc_req->wep_keys[index];
-
- if (key_length > 0) {
- memset(pkey, 0, sizeof(struct enc_key));
- pkey->type = KEY_TYPE_ID_WEP;
-
- /* Standardize the key length */
- pkey->len = (key_length > KEY_LEN_WEP_40) ?
- KEY_LEN_WEP_104 : KEY_LEN_WEP_40;
- memcpy(pkey->key, key_material, key_length);
- }
-
- if (set_tx_key) {
- /* Ensure the chosen key is valid */
- if (!pkey->len) {
- lbs_deb_wext("key not set, so cannot enable it\n");
- ret = -EINVAL;
- goto out;
- }
- assoc_req->wep_tx_keyidx = index;
- }
-
- assoc_req->secinfo.wep_enabled = 1;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int validate_key_index(u16 def_index, u16 raw_index,
- u16 *out_index, u16 *is_default)
-{
- if (!out_index || !is_default)
- return -EINVAL;
-
- /* Verify index if present, otherwise use default TX key index */
- if (raw_index > 0) {
- if (raw_index > 4)
- return -EINVAL;
- *out_index = raw_index - 1;
- } else {
- *out_index = def_index;
- *is_default = 1;
- }
- return 0;
-}
-
-static void disable_wep(struct assoc_request *assoc_req)
-{
- int i;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /* Set Open System auth mode */
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
-
- /* Clear WEP keys and mark WEP as disabled */
- assoc_req->secinfo.wep_enabled = 0;
- for (i = 0; i < 4; i++)
- assoc_req->wep_keys[i].len = 0;
-
- set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
- set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
-
- lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-static void disable_wpa(struct assoc_request *assoc_req)
-{
- lbs_deb_enter(LBS_DEB_WEXT);
-
- memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key));
- assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST;
- set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
-
- memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key));
- assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST;
- set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
-
- assoc_req->secinfo.WPAenabled = 0;
- assoc_req->secinfo.WPA2enabled = 0;
- set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
-
- lbs_deb_leave(LBS_DEB_WEXT);
-}
-
-/**
- * @brief Set Encryption key
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param vwrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- * @return 0 --success, otherwise fail
- */
-static int lbs_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
- struct assoc_request * assoc_req;
- u16 is_default = 0, index = 0, set_tx_key = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- mutex_lock(&priv->lock);
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (dwrq->flags & IW_ENCODE_DISABLED) {
- disable_wep (assoc_req);
- disable_wpa (assoc_req);
- goto out;
- }
-
- ret = validate_key_index(assoc_req->wep_tx_keyidx,
- (dwrq->flags & IW_ENCODE_INDEX),
- &index, &is_default);
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
-
- /* If WEP isn't enabled, or if there is no key data but a valid
- * index, set the TX key.
- */
- if (!assoc_req->secinfo.wep_enabled || (dwrq->length == 0 && !is_default))
- set_tx_key = 1;
-
- ret = lbs_set_wep_key(assoc_req, extra, dwrq->length, index, set_tx_key);
- if (ret)
- goto out;
-
- if (dwrq->length)
- set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
- if (set_tx_key)
- set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
-
- if (dwrq->flags & IW_ENCODE_RESTRICTED) {
- priv->authtype_auto = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
- } else if (dwrq->flags & IW_ENCODE_OPEN) {
- priv->authtype_auto = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- }
-
-out:
- if (ret == 0) {
- set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- } else {
- lbs_cancel_association_work(priv);
- }
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Get Extended Encryption key (WPA/802.1x and WEP)
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param vwrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- * @return 0 on success, otherwise failure
- */
-static int lbs_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- int ret = -EINVAL;
- struct lbs_private *priv = dev->ml_priv;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int index, max_key_len;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- max_key_len = dwrq->length - sizeof(*ext);
- if (max_key_len < 0)
- goto out;
-
- index = dwrq->flags & IW_ENCODE_INDEX;
- if (index) {
- if (index < 1 || index > 4)
- goto out;
- index--;
- } else {
- index = priv->wep_tx_keyidx;
- }
-
- if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
- ext->alg != IW_ENCODE_ALG_WEP) {
- if (index != 0 || priv->mode != IW_MODE_INFRA)
- goto out;
- }
-
- dwrq->flags = index + 1;
- memset(ext, 0, sizeof(*ext));
-
- if ( !priv->secinfo.wep_enabled
- && !priv->secinfo.WPAenabled
- && !priv->secinfo.WPA2enabled) {
- ext->alg = IW_ENCODE_ALG_NONE;
- ext->key_len = 0;
- dwrq->flags |= IW_ENCODE_DISABLED;
- } else {
- u8 *key = NULL;
-
- if ( priv->secinfo.wep_enabled
- && !priv->secinfo.WPAenabled
- && !priv->secinfo.WPA2enabled) {
- /* WEP */
- ext->alg = IW_ENCODE_ALG_WEP;
- ext->key_len = priv->wep_keys[index].len;
- key = &priv->wep_keys[index].key[0];
- } else if ( !priv->secinfo.wep_enabled
- && (priv->secinfo.WPAenabled ||
- priv->secinfo.WPA2enabled)) {
- /* WPA */
- struct enc_key * pkey = NULL;
-
- if ( priv->wpa_mcast_key.len
- && (priv->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED))
- pkey = &priv->wpa_mcast_key;
- else if ( priv->wpa_unicast_key.len
- && (priv->wpa_unicast_key.flags & KEY_INFO_WPA_ENABLED))
- pkey = &priv->wpa_unicast_key;
-
- if (pkey) {
- if (pkey->type == KEY_TYPE_ID_AES) {
- ext->alg = IW_ENCODE_ALG_CCMP;
- } else {
- ext->alg = IW_ENCODE_ALG_TKIP;
- }
- ext->key_len = pkey->len;
- key = &pkey->key[0];
- } else {
- ext->alg = IW_ENCODE_ALG_TKIP;
- ext->key_len = 0;
- }
- } else {
- goto out;
- }
-
- if (ext->key_len > max_key_len) {
- ret = -E2BIG;
- goto out;
- }
-
- if (ext->key_len)
- memcpy(ext->key, key, ext->key_len);
- else
- dwrq->flags |= IW_ENCODE_NOKEY;
- dwrq->flags |= IW_ENCODE_ENABLED;
- }
- ret = 0;
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-/**
- * @brief Set Encryption key Extended (WPA/802.1x and WEP)
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param vwrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- * @return 0 --success, otherwise fail
- */
-static int lbs_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int alg = ext->alg;
- struct assoc_request * assoc_req;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- mutex_lock(&priv->lock);
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- if ((alg == IW_ENCODE_ALG_NONE) || (dwrq->flags & IW_ENCODE_DISABLED)) {
- disable_wep (assoc_req);
- disable_wpa (assoc_req);
- } else if (alg == IW_ENCODE_ALG_WEP) {
- u16 is_default = 0, index, set_tx_key = 0;
-
- ret = validate_key_index(assoc_req->wep_tx_keyidx,
- (dwrq->flags & IW_ENCODE_INDEX),
- &index, &is_default);
- if (ret)
- goto out;
-
- /* If WEP isn't enabled, or if there is no key data but a valid
- * index, or if the set-TX-key flag was passed, set the TX key.
- */
- if ( !assoc_req->secinfo.wep_enabled
- || (dwrq->length == 0 && !is_default)
- || (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY))
- set_tx_key = 1;
-
- /* Copy key to driver */
- ret = lbs_set_wep_key(assoc_req, ext->key, ext->key_len, index,
- set_tx_key);
- if (ret)
- goto out;
-
- if (dwrq->flags & IW_ENCODE_RESTRICTED) {
- priv->authtype_auto = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
- } else if (dwrq->flags & IW_ENCODE_OPEN) {
- priv->authtype_auto = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- }
-
- /* Mark the various WEP bits as modified */
- set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
- if (dwrq->length)
- set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
- if (set_tx_key)
- set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
- } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) {
- struct enc_key * pkey;
-
- /* validate key length */
- if (((alg == IW_ENCODE_ALG_TKIP)
- && (ext->key_len != KEY_LEN_WPA_TKIP))
- || ((alg == IW_ENCODE_ALG_CCMP)
- && (ext->key_len != KEY_LEN_WPA_AES))) {
- lbs_deb_wext("invalid size %d for key of alg "
- "type %d\n",
- ext->key_len,
- alg);
- ret = -EINVAL;
- goto out;
- }
-
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- pkey = &assoc_req->wpa_mcast_key;
- set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
- } else {
- pkey = &assoc_req->wpa_unicast_key;
- set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
- }
-
- memset(pkey, 0, sizeof (struct enc_key));
- memcpy(pkey->key, ext->key, ext->key_len);
- pkey->len = ext->key_len;
- if (pkey->len)
- pkey->flags |= KEY_INFO_WPA_ENABLED;
-
- /* Do this after zeroing key structure */
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- pkey->flags |= KEY_INFO_WPA_MCAST;
- } else {
- pkey->flags |= KEY_INFO_WPA_UNICAST;
- }
-
- if (alg == IW_ENCODE_ALG_TKIP) {
- pkey->type = KEY_TYPE_ID_TKIP;
- } else if (alg == IW_ENCODE_ALG_CCMP) {
- pkey->type = KEY_TYPE_ID_AES;
- }
-
- /* If WPA isn't enabled yet, do that now */
- if ( assoc_req->secinfo.WPAenabled == 0
- && assoc_req->secinfo.WPA2enabled == 0) {
- assoc_req->secinfo.WPAenabled = 1;
- assoc_req->secinfo.WPA2enabled = 1;
- set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
- }
-
- /* Only disable wep if necessary: can't waste time here. */
- if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE)
- disable_wep(assoc_req);
- }
-
-out:
- if (ret == 0) {
- /* 802.1x and WPA rekeying must happen as quickly as possible,
- * especially during the 4-way handshake; thus if in
- * infrastructure mode, and either (a) 802.1x is enabled or
- * (b) WPA is being used, set the key right away.
- */
- if (assoc_req->mode == IW_MODE_INFRA &&
- ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) ||
- (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) ||
- assoc_req->secinfo.WPAenabled ||
- assoc_req->secinfo.WPA2enabled)) {
- lbs_do_association_work(priv);
- } else
- lbs_postpone_association_work(priv);
- } else {
- lbs_cancel_association_work(priv);
- }
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-
-static int lbs_set_genie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- struct assoc_request * assoc_req;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- mutex_lock(&priv->lock);
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (dwrq->length > MAX_WPA_IE_LEN ||
- (dwrq->length && extra == NULL)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (dwrq->length) {
- memcpy(&assoc_req->wpa_ie[0], extra, dwrq->length);
- assoc_req->wpa_ie_len = dwrq->length;
- } else {
- memset(&assoc_req->wpa_ie[0], 0, sizeof(priv->wpa_ie));
- assoc_req->wpa_ie_len = 0;
- }
-
-out:
- if (ret == 0) {
- set_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- } else {
- lbs_cancel_association_work(priv);
- }
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_genie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (priv->wpa_ie_len == 0) {
- dwrq->length = 0;
- goto out;
- }
-
- if (dwrq->length < priv->wpa_ie_len) {
- ret = -E2BIG;
- goto out;
- }
-
- dwrq->length = priv->wpa_ie_len;
- memcpy(extra, &priv->wpa_ie[0], priv->wpa_ie_len);
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-
-static int lbs_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *dwrq,
- char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- struct assoc_request * assoc_req;
- int ret = 0;
- int updated = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- mutex_lock(&priv->lock);
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- switch (dwrq->flags & IW_AUTH_INDEX) {
- case IW_AUTH_PRIVACY_INVOKED:
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- case IW_AUTH_TKIP_COUNTERMEASURES:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_DROP_UNENCRYPTED:
- /*
- * libertas does not use these parameters
- */
- break;
-
- case IW_AUTH_KEY_MGMT:
- assoc_req->secinfo.key_mgmt = dwrq->value;
- updated = 1;
- break;
-
- case IW_AUTH_WPA_VERSION:
- if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) {
- assoc_req->secinfo.WPAenabled = 0;
- assoc_req->secinfo.WPA2enabled = 0;
- disable_wpa (assoc_req);
- }
- if (dwrq->value & IW_AUTH_WPA_VERSION_WPA) {
- assoc_req->secinfo.WPAenabled = 1;
- assoc_req->secinfo.wep_enabled = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- }
- if (dwrq->value & IW_AUTH_WPA_VERSION_WPA2) {
- assoc_req->secinfo.WPA2enabled = 1;
- assoc_req->secinfo.wep_enabled = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- }
- updated = 1;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- if (dwrq->value & IW_AUTH_ALG_SHARED_KEY) {
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
- } else if (dwrq->value & IW_AUTH_ALG_OPEN_SYSTEM) {
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- } else if (dwrq->value & IW_AUTH_ALG_LEAP) {
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_LEAP;
- } else {
- ret = -EINVAL;
- }
- updated = 1;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- if (dwrq->value) {
- if (!assoc_req->secinfo.WPAenabled &&
- !assoc_req->secinfo.WPA2enabled) {
- assoc_req->secinfo.WPAenabled = 1;
- assoc_req->secinfo.WPA2enabled = 1;
- assoc_req->secinfo.wep_enabled = 0;
- assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
- }
- } else {
- assoc_req->secinfo.WPAenabled = 0;
- assoc_req->secinfo.WPA2enabled = 0;
- disable_wpa (assoc_req);
- }
- updated = 1;
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
-out:
- if (ret == 0) {
- if (updated)
- set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- } else if (ret != -EOPNOTSUPP) {
- lbs_cancel_association_work(priv);
- }
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_auth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *dwrq,
- char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- switch (dwrq->flags & IW_AUTH_INDEX) {
- case IW_AUTH_KEY_MGMT:
- dwrq->value = priv->secinfo.key_mgmt;
- break;
-
- case IW_AUTH_WPA_VERSION:
- dwrq->value = 0;
- if (priv->secinfo.WPAenabled)
- dwrq->value |= IW_AUTH_WPA_VERSION_WPA;
- if (priv->secinfo.WPA2enabled)
- dwrq->value |= IW_AUTH_WPA_VERSION_WPA2;
- if (!dwrq->value)
- dwrq->value |= IW_AUTH_WPA_VERSION_DISABLED;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- dwrq->value = priv->secinfo.auth_mode;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- if (priv->secinfo.WPAenabled && priv->secinfo.WPA2enabled)
- dwrq->value = 1;
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-
-static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int ret = 0;
- struct lbs_private *priv = dev->ml_priv;
- s16 dbm = (s16) vwrq->value;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (vwrq->disabled) {
- lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0);
- goto out;
- }
-
- if (vwrq->fixed == 0) {
- /* User requests automatic tx power control, however there are
- * many auto tx settings. For now use firmware defaults until
- * we come up with a good way to expose these to the user. */
- if (priv->fwrelease < 0x09000000) {
- ret = lbs_set_power_adapt_cfg(priv, 1,
- POW_ADAPT_DEFAULT_P0,
- POW_ADAPT_DEFAULT_P1,
- POW_ADAPT_DEFAULT_P2);
- if (ret)
- goto out;
- }
- ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
- TPC_DEFAULT_P2, 1);
- if (ret)
- goto out;
- dbm = priv->txpower_max;
- } else {
- /* Userspace check in iwrange if it should use dBm or mW,
- * therefore this should never happen... Jean II */
- if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- /* Validate requested power level against firmware allowed
- * levels */
- if (priv->txpower_min && (dbm < priv->txpower_min)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (priv->txpower_max && (dbm > priv->txpower_max)) {
- ret = -EINVAL;
- goto out;
- }
- if (priv->fwrelease < 0x09000000) {
- ret = lbs_set_power_adapt_cfg(priv, 0,
- POW_ADAPT_DEFAULT_P0,
- POW_ADAPT_DEFAULT_P1,
- POW_ADAPT_DEFAULT_P2);
- if (ret)
- goto out;
- }
- ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
- TPC_DEFAULT_P2, 1);
- if (ret)
- goto out;
- }
-
- /* If the radio was off, turn it on */
- if (!priv->radio_on) {
- ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1);
- if (ret)
- goto out;
- }
-
- lbs_deb_wext("txpower set %d dBm\n", dbm);
-
- ret = lbs_set_tx_power(priv, dbm);
-
-out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- /*
- * Note : if dwrq->flags != 0, we should get the relevant SSID from
- * the SSID list...
- */
-
- /*
- * Get the current SSID
- */
- if (priv->connect_status == LBS_CONNECTED) {
- memcpy(extra, priv->curbssparams.ssid,
- priv->curbssparams.ssid_len);
- } else {
- memset(extra, 0, 32);
- }
- /*
- * If none, we may want to get the one that was set
- */
-
- dwrq->length = priv->curbssparams.ssid_len;
-
- dwrq->flags = 1; /* active */
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len = 0;
- struct assoc_request * assoc_req;
- int in_ssid_len = dwrq->length;
- DECLARE_SSID_BUF(ssid_buf);
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (!priv->radio_on) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Check the size of the string */
- if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
- ret = -E2BIG;
- goto out;
- }
-
- memset(&ssid, 0, sizeof(ssid));
-
- if (!dwrq->flags || !in_ssid_len) {
- /* "any" SSID requested; leave SSID blank */
- } else {
- /* Specific SSID requested */
- memcpy(&ssid, extra, in_ssid_len);
- ssid_len = in_ssid_len;
- }
-
- if (!ssid_len) {
- lbs_deb_wext("requested any SSID\n");
- } else {
- lbs_deb_wext("requested SSID '%s'\n",
- print_ssid(ssid_buf, ssid, ssid_len));
- }
-
-out:
- mutex_lock(&priv->lock);
- if (ret == 0) {
- /* Get or create the current association request */
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- ret = -ENOMEM;
- } else {
- /* Copy the SSID to the association request */
- memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
- assoc_req->ssid_len = ssid_len;
- set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- }
- }
-
- /* Cancel the association request if there was an error */
- if (ret != 0) {
- lbs_cancel_association_work(priv);
- }
-
- mutex_unlock(&priv->lock);
-
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-
-#ifdef CONFIG_LIBERTAS_MESH
-static int lbs_mesh_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- memcpy(extra, priv->mesh_ssid, priv->mesh_ssid_len);
-
- dwrq->length = priv->mesh_ssid_len;
-
- dwrq->flags = 1; /* active */
-
- lbs_deb_leave(LBS_DEB_WEXT);
- return 0;
-}
-
-static int lbs_mesh_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (!priv->radio_on) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Check the size of the string */
- if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
- ret = -E2BIG;
- goto out;
- }
-
- if (!dwrq->flags || !dwrq->length) {
- ret = -EINVAL;
- goto out;
- } else {
- /* Specific SSID requested */
- memcpy(priv->mesh_ssid, extra, dwrq->length);
- priv->mesh_ssid_len = dwrq->length;
- }
-
- lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel);
- out:
- lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
- return ret;
-}
-#endif
-
-/**
- * @brief Connect to the AP or Ad-hoc Network with specific bssid
- *
- * @param dev A pointer to net_device structure
- * @param info A pointer to iw_request_info structure
- * @param awrq A pointer to iw_param structure
- * @param extra A pointer to extra data buf
- * @return 0 --success, otherwise fail
- */
-static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
- struct sockaddr *awrq, char *extra)
-{
- struct lbs_private *priv = dev->ml_priv;
- struct assoc_request * assoc_req;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_WEXT);
-
- if (!priv->radio_on)
- return -EINVAL;
-
- if (awrq->sa_family != ARPHRD_ETHER)
- return -EINVAL;
-
- lbs_deb_wext("ASSOC: WAP: sa_data %pM\n", awrq->sa_data);
-
- mutex_lock(&priv->lock);
-
- /* Get or create the current association request */
- assoc_req = lbs_get_association_request(priv);
- if (!assoc_req) {
- lbs_cancel_association_work(priv);
- ret = -ENOMEM;
- } else {
- /* Copy the BSSID to the association request */
- memcpy(&assoc_req->bssid, awrq->sa_data, ETH_ALEN);
- set_bit(ASSOC_FLAG_BSSID, &assoc_req->flags);
- lbs_postpone_association_work(priv);
- }
-
- mutex_unlock(&priv->lock);
-
- return ret;
-}
-
-/*
- * iwconfig settable callbacks
- */
-static const iw_handler lbs_handler[] = {
- (iw_handler) NULL, /* SIOCSIWCOMMIT */
- (iw_handler) lbs_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) lbs_set_freq, /* SIOCSIWFREQ */
- (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */
- (iw_handler) lbs_set_mode, /* SIOCSIWMODE */
- (iw_handler) lbs_get_mode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) lbs_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
- (iw_handler) lbs_set_wap, /* SIOCSIWAP */
- (iw_handler) lbs_get_wap, /* SIOCGIWAP */
- (iw_handler) NULL, /* SIOCSIWMLME */
- (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */
- (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */
- (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */
- (iw_handler) lbs_set_essid, /* SIOCSIWESSID */
- (iw_handler) lbs_get_essid, /* SIOCGIWESSID */
- (iw_handler) lbs_set_nick, /* SIOCSIWNICKN */
- (iw_handler) lbs_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) lbs_set_rate, /* SIOCSIWRATE */
- (iw_handler) lbs_get_rate, /* SIOCGIWRATE */
- (iw_handler) lbs_set_rts, /* SIOCSIWRTS */
- (iw_handler) lbs_get_rts, /* SIOCGIWRTS */
- (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */
- (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */
- (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */
- (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */
- (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */
- (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */
- (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */
- (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */
- (iw_handler) lbs_set_power, /* SIOCSIWPOWER */
- (iw_handler) lbs_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */
- (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */
- (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */
- (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */
- (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */
- (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
-};
-struct iw_handler_def lbs_handler_def = {
- .num_standard = ARRAY_SIZE(lbs_handler),
- .standard = (iw_handler *) lbs_handler,
- .get_wireless_stats = lbs_get_wireless_stats,
-};
-
-#ifdef CONFIG_LIBERTAS_MESH
-static const iw_handler mesh_wlan_handler[] = {
- (iw_handler) NULL, /* SIOCSIWCOMMIT */
- (iw_handler) lbs_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) lbs_mesh_set_freq, /* SIOCSIWFREQ */
- (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */
- (iw_handler) NULL, /* SIOCSIWMODE */
- (iw_handler) mesh_wlan_get_mode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) lbs_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
- (iw_handler) NULL, /* SIOCSIWAP */
- (iw_handler) NULL, /* SIOCGIWAP */
- (iw_handler) NULL, /* SIOCSIWMLME */
- (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */
- (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */
- (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */
- (iw_handler) lbs_mesh_set_essid,/* SIOCSIWESSID */
- (iw_handler) lbs_mesh_get_essid,/* SIOCGIWESSID */
- (iw_handler) NULL, /* SIOCSIWNICKN */
- (iw_handler) mesh_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) lbs_set_rate, /* SIOCSIWRATE */
- (iw_handler) lbs_get_rate, /* SIOCGIWRATE */
- (iw_handler) lbs_set_rts, /* SIOCSIWRTS */
- (iw_handler) lbs_get_rts, /* SIOCGIWRTS */
- (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */
- (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */
- (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */
- (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */
- (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */
- (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */
- (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */
- (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */
- (iw_handler) lbs_set_power, /* SIOCSIWPOWER */
- (iw_handler) lbs_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */
- (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */
- (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */
- (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */
- (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */
- (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
-};
-
-struct iw_handler_def mesh_handler_def = {
- .num_standard = ARRAY_SIZE(mesh_wlan_handler),
- .standard = (iw_handler *) mesh_wlan_handler,
- .get_wireless_stats = lbs_get_wireless_stats,
-};
-#endif
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
deleted file mode 100644
index f3f19fe..0000000
--- a/drivers/net/wireless/libertas/wext.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/**
- * This file contains definition for IOCTL call.
- */
-#ifndef _LBS_WEXT_H_
-#define _LBS_WEXT_H_
-
-void lbs_send_disconnect_notification(struct lbs_private *priv);
-void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
-
-struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
- struct lbs_private *priv,
- u8 band,
- u16 channel);
-
-extern struct iw_handler_def lbs_handler_def;
-
-#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 6a04c21..817fffc 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -549,7 +549,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
prxpd = (struct rxpd *) skb->data;
- stats.flag = 0;
+ memset(&stats, 0, sizeof(stats));
if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
stats.freq = priv->cur_freq;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 49a7dfb..e7f299d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1291,6 +1291,11 @@ static int __init init_mac80211_hwsim(void)
hw->wiphy->n_addresses = 2;
hw->wiphy->addresses = data->addresses;
+ if (fake_hw_scan) {
+ hw->wiphy->max_scan_ssids = 255;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ }
+
hw->channel_change_time = 1;
hw->queues = 4;
hw->wiphy->interface_modes =
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 07c4528..a5ea89c 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -41,6 +41,8 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
{ PCI_DEVICE(0x1260, 0x3877) },
/* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3886) },
+ /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
+ { PCI_DEVICE(0x1260, 0xffff) },
{ },
};
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b0318ea..ad59595 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -69,7 +69,8 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
{USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/
{USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/
- {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */
+ /* {USB_DEVICE(0x0cde, 0x0006)}, * Medion MD40900 already listed above,
+ * just noting it here for clarity */
{USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
{USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */
{USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 5e7f344..719573b 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -520,8 +520,9 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
-static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
- int dbm);
+static int rndis_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ int mbm);
static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm);
static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
@@ -1856,20 +1857,25 @@ static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return 0;
}
-static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
- int dbm)
+static int rndis_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ int mbm)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n",
- __func__, type, dbm);
+ netdev_dbg(usbdev->net, "%s(): type:0x%x mbm:%i\n",
+ __func__, type, mbm);
+
+ if (mbm < 0 || (mbm % 100))
+ return -ENOTSUPP;
/* Device doesn't support changing txpower after initialization, only
* turn off/on radio. Support 'auto' mode and setting same dBm that is
* currently used.
*/
- if (type == TX_POWER_AUTOMATIC || dbm == get_bcm4320_power_dbm(priv)) {
+ if (type == NL80211_TX_POWER_AUTOMATIC ||
+ MBM_TO_DBM(mbm) == get_bcm4320_power_dbm(priv)) {
if (!priv->radio_on)
disassociate(usbdev, true); /* turn on radio */
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 1eb882e..3bedf56 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1229,7 +1229,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
}
txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
- rt2x00pci_txdone(entry, &txdesc);
+ rt2x00lib_txdone(entry, &txdesc);
}
}
@@ -1588,7 +1588,6 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
.reset_tuner = rt2400pci_reset_tuner,
.link_tuner = rt2400pci_link_tuner,
.write_tx_desc = rt2400pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
.write_beacon = rt2400pci_write_beacon,
.kick_tx_queue = rt2400pci_kick_tx_queue,
.kill_tx_queue = rt2400pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a29cb212..69d231d 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1365,7 +1365,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
}
txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
- rt2x00pci_txdone(entry, &txdesc);
+ rt2x00lib_txdone(entry, &txdesc);
}
}
@@ -1886,7 +1886,6 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
.reset_tuner = rt2500pci_reset_tuner,
.link_tuner = rt2500pci_link_tuner,
.write_tx_desc = rt2500pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
.write_beacon = rt2500pci_write_beacon,
.kick_tx_queue = rt2500pci_kick_tx_queue,
.kill_tx_queue = rt2500pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 002db64..4420552 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -347,6 +347,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
{
u32 mask;
u16 reg;
+ enum cipher curr_cipher;
if (crypto->cmd == SET_KEY) {
/*
@@ -357,6 +358,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
mask = TXRX_CSR0_KEY_ID.bit_mask;
rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
+ curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM);
reg &= mask;
if (reg && reg == mask)
@@ -365,6 +367,14 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID);
key->hw_key_idx += reg ? ffz(reg) : 0;
+ /*
+ * Hardware requires that all keys use the same cipher
+ * (e.g. TKIP-only, AES-only, but not TKIP+AES).
+ * If this is not the first key, compare the cipher with the
+ * first one and fall back to SW crypto if not the same.
+ */
+ if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher)
+ return -EOPNOTSUPP;
rt2500usb_register_multiwrite(rt2x00dev, reg,
crypto->key, sizeof(crypto->key));
@@ -1769,7 +1779,6 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.link_stats = rt2500usb_link_stats,
.reset_tuner = rt2500usb_reset_tuner,
.write_tx_desc = rt2500usb_write_tx_desc,
- .write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2500usb_write_beacon,
.get_tx_data_len = rt2500usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 317b780..552f9f4 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1436,6 +1436,10 @@ struct mac_iveiv_entry {
#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
+#define MAC_WCID_ATTRIBUTE_CIPHER_EXT FIELD32(0x00000400)
+#define MAC_WCID_ATTRIBUTE_BSS_IDX_EXT FIELD32(0x00000800)
+#define MAC_WCID_ATTRIBUTE_WAPI_MCBC FIELD32(0x00008000)
+#define MAC_WCID_ATTRIBUTE_WAPI_KEY_IDX FIELD32(0xff000000)
/*
* SHARED_KEY_MODE:
@@ -1557,7 +1561,9 @@ struct mac_iveiv_entry {
*/
/*
- * BBP 1: TX Antenna
+ * BBP 1: TX Antenna & Power
+ * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
*/
#define BBP1_TX_POWER FIELD8(0x07)
#define BBP1_TX_ANTENNA FIELD8(0x18)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ae20e67..d3cf0cc 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1,9 +1,9 @@
/*
+ Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
Based on the original rt2800pci.c and rt2800usb.c.
- Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -41,10 +41,6 @@
#include "rt2800lib.h"
#include "rt2800.h"
-MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("rt2800 library");
-MODULE_LICENSE("GPL");
-
/*
* Register access.
* All access to the CSR registers will go through the methods
@@ -103,8 +99,7 @@ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
}
@@ -132,8 +127,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+ rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -436,6 +430,20 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
}
EXPORT_SYMBOL(rt2800_write_beacon);
+static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ unsigned int beacon_base)
+{
+ int i;
+
+ /*
+ * For the Beacon base registers we only need to clear
+ * the whole TXWI which (when set to 0) will invalidate
+ * the entire beacon.
+ */
+ for (i = 0; i < TXWI_DESC_SIZE; i += sizeof(__le32))
+ rt2800_register_write(rt2x00dev, beacon_base + i, 0);
+}
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -558,15 +566,28 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
- rt2800_register_read(rt2x00dev, offset, &reg);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
- (crypto->cmd == SET_KEY) * crypto->cipher);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
- (crypto->cmd == SET_KEY) * crypto->bssidx);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
- rt2800_register_write(rt2x00dev, offset, reg);
+ if (crypto->cmd == SET_KEY) {
+ rt2800_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ /*
+ * Both the cipher as the BSS Idx numbers are split in a main
+ * value of 3 bits, and a extended field for adding one additional
+ * bit to the value.
+ */
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
+ (crypto->cipher & 0x7));
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT,
+ (crypto->cipher & 0x8) >> 3);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
+ (crypto->bssidx & 0x7));
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
+ (crypto->bssidx & 0x8) >> 3);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
+ rt2800_register_write(rt2x00dev, offset, reg);
+ } else {
+ rt2800_register_write(rt2x00dev, offset, 0);
+ }
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
@@ -724,19 +745,14 @@ EXPORT_SYMBOL_GPL(rt2800_config_filter);
void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
struct rt2x00intf_conf *conf, const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
* Clear current synchronisation setup.
- * For the Beacon base registers we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
*/
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2800_register_write(rt2x00dev, beacon_base, 0);
-
+ rt2800_clear_beacon(rt2x00dev,
+ HW_BEACON_OFFSET(intf->beacon->entry_idx));
/*
* Enable synchronisation.
*/
@@ -759,8 +775,8 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
if (flags & CONFIG_UPDATE_BSSID) {
reg = le32_to_cpu(conf->bssid[1]);
- rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
- rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
+ rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
+ rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
conf->bssid[1] = cpu_to_le32(reg);
rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
@@ -818,14 +834,12 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
switch ((int)ant->tx) {
case 1:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
case 3:
- /* Do nothing */
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
break;
}
@@ -1079,7 +1093,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
u8 r1;
rt2800_bbp_read(rt2x00dev, 1, &r1);
- rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
+ rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
rt2800_bbp_write(rt2x00dev, 1, r1);
rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
@@ -1556,18 +1570,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
/*
* Clear all beacons
- * For the Beacon base registers we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
*/
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
- rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
+ rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
@@ -2176,6 +2187,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_ANT_DIVERSITY, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_DAC_TEST, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
}
@@ -2183,6 +2196,10 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
if ((word & 0x00ff) == 0x00ff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+ EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+ }
+ if ((word & 0xff00) == 0xff00) {
rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
LED_MODE_TXRX_ACTIVITY);
rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
@@ -2190,7 +2207,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
- EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+ EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word);
}
/*
@@ -2490,13 +2507,26 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_AMPDU_AGGREGATION;
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
EEPROM_MAC_ADDR_0));
+ /*
+ * As rt2800 has a global fallback table we cannot specify
+ * more then one tx rate per frame but since the hw will
+ * try several rates (based on the fallback table) we should
+ * still initialize max_rates to the maximum number of rates
+ * we are going to try. Otherwise mac80211 will truncate our
+ * reported tx rates and the rc algortihm will end up with
+ * incorrect data.
+ */
+ rt2x00dev->hw->max_rates = 7;
+ rt2x00dev->hw->max_rate_tries = 1;
+
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
/*
@@ -2538,12 +2568,15 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_RX_STBC;
+ IEEE80211_HT_CAP_SGI_40;
if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
+ spec->ht.cap |=
+ rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) <<
+ IEEE80211_HT_CAP_RX_STBC_SHIFT;
+
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
@@ -2730,6 +2763,35 @@ static u64 rt2800_get_tsf(struct ieee80211_hw *hw)
return tsf;
}
+static int rt2800_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
+{
+ int ret = 0;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* we don't support RX aggregation yet */
+ ret = -ENOTSUPP;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ break;
+ default:
+ WARNING((struct rt2x00_dev *)hw->priv, "Unknown AMPDU action\n");
+ }
+
+ return ret;
+}
+
const struct ieee80211_ops rt2800_mac80211_ops = {
.tx = rt2x00mac_tx,
.start = rt2x00mac_start,
@@ -2747,5 +2809,11 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
.conf_tx = rt2800_conf_tx,
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
+ .ampdu_action = rt2800_ampdu_action,
};
EXPORT_SYMBOL_GPL(rt2800_mac80211_ops);
+
+MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink RT2800 library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b5a871e..6f11760 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -51,7 +51,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -139,8 +139,18 @@ static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
eeprom.data = rt2x00dev;
eeprom.register_read = rt2800pci_eepromregister_read;
eeprom.register_write = rt2800pci_eepromregister_write;
- eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
- PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
+ switch (rt2x00_get_field32(reg, E2PROM_CSR_TYPE))
+ {
+ case 0:
+ eeprom.width = PCI_EEPROM_WIDTH_93C46;
+ break;
+ case 1:
+ eeprom.width = PCI_EEPROM_WIDTH_93C66;
+ break;
+ default:
+ eeprom.width = PCI_EEPROM_WIDTH_93C86;
+ break;
+ }
eeprom.reg_data_in = 0;
eeprom.reg_data_out = 0;
eeprom.reg_data_clock = 0;
@@ -645,10 +655,12 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
-static void rt2800pci_write_tx_datadesc(struct queue_entry* entry,
- struct txentry_desc *txdesc)
+static void rt2800pci_write_tx_data(struct queue_entry* entry,
+ struct txentry_desc *txdesc)
{
- rt2800_write_txwi((__le32 *) entry->skb->data, txdesc);
+ __le32 *txwi = (__le32 *) entry->skb->data;
+
+ rt2800_write_txwi(txwi, txdesc);
}
@@ -813,29 +825,24 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
- u32 old_reg;
int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
u16 mcs, real_mcs;
+ int i;
/*
- * During each loop we will compare the freshly read
- * TX_STA_FIFO register value with the value read from
- * the previous loop. If the 2 values are equal then
- * we should stop processing because the chance it
- * quite big that the device has been unplugged and
- * we risk going into an endless loop.
+ * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
+ * at most X times and also stop processing once the TX_STA_FIFO_VALID
+ * flag is not set anymore.
+ *
+ * The legacy drivers use X=TX_RING_SIZE but state in a comment
+ * that the TX_STA_FIFO stack has a size of 16. We stick to our
+ * tx ring size for now.
*/
- old_reg = 0;
-
- while (1) {
+ for (i = 0; i < TX_ENTRIES; i++) {
rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
break;
- if (old_reg == reg)
- break;
- old_reg = reg;
-
wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -903,10 +910,14 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
txdesc.retry = 7;
}
- __set_bit(TXDONE_FALLBACK, &txdesc.flags);
-
+ /*
+ * the frame was retried at least once
+ * -> hw used fallback rates
+ */
+ if (txdesc.retry)
+ __set_bit(TXDONE_FALLBACK, &txdesc.flags);
- rt2x00pci_txdone(entry, &txdesc);
+ rt2x00lib_txdone(entry, &txdesc);
}
}
@@ -942,6 +953,12 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
rt2800pci_txdone(rt2x00dev);
+ /*
+ * Current beacon was sent out, fetch the next one
+ */
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ rt2x00lib_beacondone(rt2x00dev);
+
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
rt2800pci_wakeup(rt2x00dev);
@@ -1045,8 +1062,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
- .write_tx_datadesc = rt2800pci_write_tx_datadesc,
+ .write_tx_data = rt2800pci_write_tx_data,
.write_beacon = rt2800_write_beacon,
.kick_tx_queue = rt2800pci_kick_tx_queue,
.kill_tx_queue = rt2800pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index c437960..4f85f7b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -430,21 +430,24 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
+static void rt2800usb_write_tx_data(struct queue_entry* entry,
+ struct txentry_desc *txdesc)
+{
+ __le32 *txwi = (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
+
+ rt2800_write_txwi(txwi, txdesc);
+}
+
+
static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txi = (__le32 *) skb->data;
- __le32 *txwi = (__le32 *) (skb->data + TXINFO_DESC_SIZE);
u32 word;
/*
- * Initialize TXWI descriptor
- */
- rt2800_write_txwi(txwi, txdesc);
-
- /*
* Initialize TXINFO descriptor
*/
rt2x00_desc_read(txi, 0, &word);
@@ -652,7 +655,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800usb_write_tx_desc,
- .write_tx_data = rt2x00usb_write_tx_data,
+ .write_tx_data = rt2800usb_write_tx_data,
.write_beacon = rt2800_write_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e7acc6a..788b0e4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -550,10 +550,8 @@ struct rt2x00lib_ops {
void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc);
- int (*write_tx_data) (struct queue_entry *entry,
- struct txentry_desc *txdesc);
- void (*write_tx_datadesc) (struct queue_entry *entry,
- struct txentry_desc *txdesc);
+ void (*write_tx_data) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
void (*write_beacon) (struct queue_entry *entry,
struct txentry_desc *txdesc);
int (*get_tx_data_len) (struct queue_entry *entry);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 0b8efe8..12ee7bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -211,6 +211,21 @@ void rt2x00lib_txdone(struct queue_entry *entry,
bool success;
/*
+ * Unmap the skb.
+ */
+ rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
+
+ /*
+ * Remove the extra tx headroom from the skb.
+ */
+ skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Signal that the TX descriptor is no longer in the skb.
+ */
+ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+ /*
* Remove L2 padding which was added during
*/
if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -236,8 +251,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
*/
success =
test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
- test_bit(TXDONE_UNKNOWN, &txdesc->flags) ||
- test_bit(TXDONE_FALLBACK, &txdesc->flags);
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags);
/*
* Update TX statistics.
@@ -259,11 +273,22 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* Frame was send with retries, hardware tried
* different rates to send out the frame, at each
- * retry it lowered the rate 1 step.
+ * retry it lowered the rate 1 step except when the
+ * lowest rate was used.
*/
for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
tx_info->status.rates[i].idx = rate_idx - i;
tx_info->status.rates[i].flags = rate_flags;
+
+ if (rate_idx - i == 0) {
+ /*
+ * The lowest rate (index 0) was used until the
+ * number of max retries was reached.
+ */
+ tx_info->status.rates[i].count = retry_rates - i;
+ i++;
+ break;
+ }
tx_info->status.rates[i].count = 1;
}
if (i < (IEEE80211_TX_MAX_RATES - 1))
@@ -276,6 +301,21 @@ void rt2x00lib_txdone(struct queue_entry *entry,
rt2x00dev->low_level_stats.dot11ACKFailureCount++;
}
+ /*
+ * Every single frame has it's own tx status, hence report
+ * every frame as ampdu of size 1.
+ *
+ * TODO: if we can find out how many frames were aggregated
+ * by the hw we could provide the real ampdu_len to mac80211
+ * which would allow the rc algorithm to better decide on
+ * which rates are suitable.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+ tx_info->status.ampdu_len = 1;
+ tx_info->status.ampdu_ack_len = success ? 1 : 0;
+ }
+
if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
if (success)
rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 0efbf5a..2f8136c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -271,11 +271,11 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
/*
* Link tuning should only be performed when
- * an active sta or master interface exists.
- * Single monitor mode interfaces should never have
- * work with link tuners.
+ * an active sta interface exists. AP interfaces
+ * don't need link tuning and monitor mode interfaces
+ * should never have to work with link tuners.
*/
- if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
+ if (!rt2x00dev->intf_sta_count)
return;
rt2x00link_reset_tuner(rt2x00dev, false);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index abbd857..3b838c0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -282,7 +282,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
* has been initialized. Otherwise the device can reset
* the MAC registers.
*/
- rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
+ intf->mac, intf->bssid);
/*
* Some filters depend on the current working mode. We can force
@@ -562,7 +563,6 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
- int update_bssid = 0;
/*
* mac80211 might be calling this function while we are trying
@@ -577,10 +577,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
* conf->bssid can be NULL if coming from the internal
* beacon update routine.
*/
- if (changes & BSS_CHANGED_BSSID) {
- update_bssid = 1;
+ if (changes & BSS_CHANGED_BSSID)
memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
- }
spin_unlock(&intf->lock);
@@ -592,7 +590,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
*/
if (changes & BSS_CHANGED_BSSID)
rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
- update_bssid ? bss_conf->bssid : NULL);
+ bss_conf->bssid);
/*
* Update the beacon.
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 10eaffd..fc9da83 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -60,80 +60,6 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
-/*
- * TX data handlers.
- */
-int rt2x00pci_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc)
-{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-
- /*
- * This should not happen, we already checked the entry
- * was ours. When the hardware disagrees there has been
- * a queue corruption!
- */
- if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
- ERROR(rt2x00dev,
- "Corrupt queue %d, accessing entry which is not ours.\n"
- "Please file bug report to %s.\n",
- entry->queue->qid, DRV_PROJECT);
- return -EINVAL;
- }
-
- /*
- * Add the requested extra tx headroom in front of the skb.
- */
- skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
- memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
-
- /*
- * Call the driver's write_tx_datadesc function, if it exists.
- */
- if (rt2x00dev->ops->lib->write_tx_datadesc)
- rt2x00dev->ops->lib->write_tx_datadesc(entry, txdesc);
-
- /*
- * Map the skb to DMA.
- */
- if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
- rt2x00queue_map_txskb(rt2x00dev, entry->skb);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
-
-/*
- * TX/RX data handlers.
- */
-void rt2x00pci_txdone(struct queue_entry *entry,
- struct txdone_entry_desc *txdesc)
-{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-
- /*
- * Unmap the skb.
- */
- rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
-
- /*
- * Remove the extra tx headroom from the skb.
- */
- skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
-
- /*
- * Signal that the TX descriptor is no longer in the skb.
- */
- skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
-
- /*
- * Pass on to rt2x00lib.
- */
- rt2x00lib_txdone(entry, txdesc);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
-
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue = rt2x00dev->rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 00528b8..b854d62 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -86,16 +86,6 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
u32 *reg);
/**
- * rt2x00pci_write_tx_data - Initialize data for TX operation
- * @entry: The entry where the frame is located
- *
- * This function will initialize the DMA and skb descriptor
- * to prepare the entry for the actual TX operation.
- */
-int rt2x00pci_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc);
-
-/**
* struct queue_entry_priv_pci: Per entry PCI specific information
*
* @desc: Pointer to device descriptor
@@ -109,14 +99,6 @@ struct queue_entry_priv_pci {
};
/**
- * rt2x00pci_txdone - Handle TX done events.
- * @entry: The queue entry for which a TX done event was received.
- * @txdesc: The TX done descriptor for the entry.
- */
-void rt2x00pci_txdone(struct queue_entry *entry,
- struct txdone_entry_desc *txdesc);
-
-/**
* rt2x00pci_rxdone - Handle RX done events
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 35858b1..5097fe0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -353,13 +353,18 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Check if more fragments are pending
*/
- if (ieee80211_has_morefrags(hdr->frame_control) ||
- (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
+ if (ieee80211_has_morefrags(hdr->frame_control)) {
__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
}
/*
+ * Check if more frames (!= fragments) are pending
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
+ __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
+
+ /*
* Beacons and probe responses require the tsf timestamp
* to be inserted into the frame, except for a frame that has been injected
* through a monitor interface. This latter is needed for testing a
@@ -399,6 +404,46 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
}
+static int rt2x00queue_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+
+ /*
+ * This should not happen, we already checked the entry
+ * was ours. When the hardware disagrees there has been
+ * a queue corruption!
+ */
+ if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
+ rt2x00dev->ops->lib->get_entry_state(entry))) {
+ ERROR(rt2x00dev,
+ "Corrupt queue %d, accessing entry which is not ours.\n"
+ "Please file bug report to %s.\n",
+ entry->queue->qid, DRV_PROJECT);
+ return -EINVAL;
+ }
+
+ /*
+ * Add the requested extra tx headroom in front of the skb.
+ */
+ skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+ memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Call the driver's write_tx_data function, if it exists.
+ */
+ if (rt2x00dev->ops->lib->write_tx_data)
+ rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
+
+ /*
+ * Map the skb to DMA.
+ */
+ if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00queue_map_txskb(rt2x00dev, entry->skb);
+
+ return 0;
+}
+
static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
@@ -510,8 +555,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* call failed. Since we always return NETDEV_TX_OK to mac80211,
* this frame will simply be dropped.
*/
- if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
- &txdesc))) {
+ if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
return -EIO;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index f791708..bd54f55 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -213,9 +213,16 @@ struct rxdone_entry_desc {
/**
* enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
*
+ * Every txdone report has to contain the basic result of the
+ * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
+ * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
+ * conjunction with all of these flags but should only be set
+ * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
+ * in conjunction with &TXDONE_FAILURE.
+ *
* @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
* @TXDONE_SUCCESS: Frame was successfully send
- * @TXDONE_FALLBACK: Frame was successfully send using a fallback rate.
+ * @TXDONE_FALLBACK: Hardware used fallback rates for retries
* @TXDONE_FAILURE: Frame was not successfully send
* @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
* frame transmission failed due to excessive retries.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b45bc24..a22837c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -178,11 +178,6 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
return;
/*
- * Remove the descriptor from the front of the skb.
- */
- skb_pull(entry->skb, entry->queue->desc_size);
-
- /*
* Obtain the status about this packet.
* Note that when the status is 0 it does not mean the
* frame was send out correctly. It only means the frame
@@ -201,48 +196,28 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
rt2x00lib_txdone(entry, &txdesc);
}
-int rt2x00usb_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc)
+static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
u32 length;
- /*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memset(entry->skb->data, 0, entry->queue->desc_size);
-
- /*
- * USB devices cannot blindly pass the skb->len as the
- * length of the data to usb_fill_bulk_urb. Pass the skb
- * to the driver to determine what the length should be.
- */
- length = rt2x00dev->ops->lib->get_tx_data_len(entry);
-
- usb_fill_bulk_urb(entry_priv->urb, usb_dev,
- usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
- entry->skb->data, length,
- rt2x00usb_interrupt_txdone, entry);
-
- /*
- * Call the driver's write_tx_datadesc function, if it exists.
- */
- if (rt2x00dev->ops->lib->write_tx_datadesc)
- rt2x00dev->ops->lib->write_tx_datadesc(entry, txdesc);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
+ if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) {
+ /*
+ * USB devices cannot blindly pass the skb->len as the
+ * length of the data to usb_fill_bulk_urb. Pass the skb
+ * to the driver to determine what the length should be.
+ */
+ length = rt2x00dev->ops->lib->get_tx_data_len(entry);
-static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
-{
- struct queue_entry_priv_usb *entry_priv = entry->priv_data;
+ usb_fill_bulk_urb(entry_priv->urb, usb_dev,
+ usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
+ entry->skb->data, length,
+ rt2x00usb_interrupt_txdone, entry);
- if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
+ }
}
void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 255b81e..2b7a188 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -351,16 +351,6 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
/**
- * rt2x00usb_write_tx_data - Initialize URB for TX operation
- * @entry: The entry where the frame is located
- *
- * This function will initialize the URB and skb descriptor
- * to prepare the entry for the actual TX operation.
- */
-int rt2x00usb_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc);
-
-/**
* struct queue_entry_priv_usb: Per entry USB specific information
*
* @urb: Urb structure used for device communication.
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 243df08..0123fbc 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -931,6 +931,9 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -2049,29 +2052,24 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
- u32 old_reg;
int type;
int index;
+ int i;
/*
- * During each loop we will compare the freshly read
- * STA_CSR4 register value with the value read from
- * the previous loop. If the 2 values are equal then
- * we should stop processing because the chance is
- * quite big that the device has been unplugged and
- * we risk going into an endless loop.
+ * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
+ * at most X times and also stop processing once the TX_STA_FIFO_VALID
+ * flag is not set anymore.
+ *
+ * The legacy drivers use X=TX_RING_SIZE but state in a comment
+ * that the TX_STA_FIFO stack has a size of 16. We stick to our
+ * tx ring size for now.
*/
- old_reg = 0;
-
- while (1) {
+ for (i = 0; i < TX_ENTRIES; i++) {
rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
break;
- if (old_reg == reg)
- break;
- old_reg = reg;
-
/*
* Skip this entry when it contains an invalid
* queue identication number.
@@ -2110,7 +2108,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
txdesc.retry = 0;
- rt2x00pci_txdone(entry_done, &txdesc);
+ rt2x00lib_txdone(entry_done, &txdesc);
entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
}
@@ -2130,7 +2128,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
}
txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
- rt2x00pci_txdone(entry, &txdesc);
+ /*
+ * the frame was retried at least once
+ * -> hw used fallback rates
+ */
+ if (txdesc.retry)
+ __set_bit(TXDONE_FALLBACK, &txdesc.flags);
+
+ rt2x00lib_txdone(entry, &txdesc);
}
}
@@ -2195,6 +2200,12 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
rt61pci_wakeup(rt2x00dev);
+ /*
+ * 5 - Beacon done interrupt.
+ */
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+ rt2x00lib_beacondone(rt2x00dev);
+
return IRQ_HANDLED;
}
@@ -2587,6 +2598,18 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * As rt61 has a global fallback table we cannot specify
+ * more then one tx rate per frame but since the hw will
+ * try several rates (based on the fallback table) we should
+ * still initialize max_rates to the maximum number of rates
+ * we are going to try. Otherwise mac80211 will truncate our
+ * reported tx rates and the rc algortihm will end up with
+ * incorrect data.
+ */
+ rt2x00dev->hw->max_rates = 7;
+ rt2x00dev->hw->max_rate_tries = 1;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
@@ -2783,7 +2806,6 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.reset_tuner = rt61pci_reset_tuner,
.link_tuner = rt61pci_link_tuner,
.write_tx_desc = rt61pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
.write_beacon = rt61pci_write_beacon,
.kick_tx_queue = rt61pci_kick_tx_queue,
.kill_tx_queue = rt61pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 113ad69..286dd97 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -816,6 +816,9 @@ static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -2246,7 +2249,6 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.reset_tuner = rt73usb_reset_tuner,
.link_tuner = rt73usb_link_tuner,
.write_tx_desc = rt73usb_write_tx_desc,
- .write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt73usb_write_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 515817d..4270502 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -671,7 +671,7 @@ static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
(u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
}
-void rtl8180_beacon_work(struct work_struct *work)
+static void rtl8180_beacon_work(struct work_struct *work)
{
struct rtl8180_vif *vif_priv =
container_of(work, struct rtl8180_vif, beacon_work.work);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 163a8a0..43307bd 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -42,7 +42,8 @@ static struct zd_reg_alpha2_map reg_alpha2_map[] = {
{ ZD_REGDOMAIN_IC, "CA" },
{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
{ ZD_REGDOMAIN_JAPAN, "JP" },
- { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
+ { ZD_REGDOMAIN_JAPAN_2, "JP" },
+ { ZD_REGDOMAIN_JAPAN_3, "JP" },
{ ZD_REGDOMAIN_SPAIN, "ES" },
{ ZD_REGDOMAIN_FRANCE, "FR" },
};
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index e4c70e3..a6d86b9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -212,8 +212,9 @@ struct zd_mac {
#define ZD_REGDOMAIN_ETSI 0x30
#define ZD_REGDOMAIN_SPAIN 0x31
#define ZD_REGDOMAIN_FRANCE 0x32
-#define ZD_REGDOMAIN_JAPAN_ADD 0x40
+#define ZD_REGDOMAIN_JAPAN_2 0x40
#define ZD_REGDOMAIN_JAPAN 0x41
+#define ZD_REGDOMAIN_JAPAN_3 0x49
enum {
MIN_CHANNEL24 = 1,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 7a44c38..d798927 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -40,11 +40,7 @@
*/
enum qeth_dbf_names {
QETH_DBF_SETUP,
- QETH_DBF_QERR,
- QETH_DBF_TRACE,
QETH_DBF_MSG,
- QETH_DBF_SENSE,
- QETH_DBF_MISC,
QETH_DBF_CTRL,
QETH_DBF_INFOS /* must be last element */
};
@@ -71,7 +67,19 @@ struct qeth_dbf_info {
debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
#define QETH_DBF_TEXT_(name, level, text...) \
- qeth_dbf_longtext(QETH_DBF_##name, level, text)
+ qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+ debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+ debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+ debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+ qeth_dbf_longtext(card->debug, level, text)
#define SENSE_COMMAND_REJECT_BYTE 0
#define SENSE_COMMAND_REJECT_FLAG 0x80
@@ -738,6 +746,7 @@ struct qeth_card {
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
+ debug_info_t *debug;
struct mutex conf_mutex;
};
@@ -857,9 +866,10 @@ void qeth_core_get_ethtool_stats(struct net_device *,
struct ethtool_stats *, u64 *);
void qeth_core_get_strings(struct net_device *, u32, u8 *);
void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 13ef46b..b701906 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -32,16 +32,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
- [QETH_DBF_QERR] = {"qeth_qerr",
- 2, 1, 8, 2, &debug_hex_ascii_view, NULL},
- [QETH_DBF_TRACE] = {"qeth_trace",
- 4, 1, 8, 3, &debug_hex_ascii_view, NULL},
[QETH_DBF_MSG] = {"qeth_msg",
8, 1, 128, 3, &debug_sprintf_view, NULL},
- [QETH_DBF_SENSE] = {"qeth_sense",
- 2, 1, 64, 2, &debug_hex_ascii_view, NULL},
- [QETH_DBF_MISC] = {"qeth_misc",
- 2, 1, 256, 2, &debug_hex_ascii_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
@@ -65,48 +57,6 @@ static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
-static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
- struct qdio_buffer *buffer, int is_tso,
- int *next_element_to_fill)
-{
- struct skb_frag_struct *frag;
- int fragno;
- unsigned long addr;
- int element, cnt, dlen;
-
- fragno = skb_shinfo(skb)->nr_frags;
- element = *next_element_to_fill;
- dlen = 0;
-
- if (is_tso)
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
- else
- buffer->element[element].flags =
- SBAL_FLAGS_FIRST_FRAG;
- dlen = skb->len - skb->data_len;
- if (dlen) {
- buffer->element[element].addr = skb->data;
- buffer->element[element].length = dlen;
- element++;
- }
- for (cnt = 0; cnt < fragno; cnt++) {
- frag = &skb_shinfo(skb)->frags[cnt];
- addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
- frag->page_offset;
- buffer->element[element].addr = (char *)addr;
- buffer->element[element].length = frag->size;
- if (cnt < (fragno - 1))
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
- else
- buffer->element[element].flags =
- SBAL_FLAGS_LAST_FRAG;
- element++;
- }
- *next_element_to_fill = element;
-}
-
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
@@ -232,7 +182,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
- QETH_DBF_TEXT(TRACE, 5, "clwrklst");
+ QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.in_buf_pool.entry_list, list){
list_del(&pool_entry->list);
@@ -246,7 +196,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
void *ptr;
int i, j;
- QETH_DBF_TEXT(TRACE, 5, "alocpool");
+ QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
if (!pool_entry) {
@@ -273,7 +223,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
- QETH_DBF_TEXT(TRACE, 2, "realcbp");
+ QETH_CARD_TEXT(card, 2, "realcbp");
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
@@ -293,7 +243,7 @@ static int qeth_issue_next_read(struct qeth_card *card)
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 5, "issnxrd");
+ QETH_CARD_TEXT(card, 5, "issnxrd");
if (card->read.state != CH_STATE_UP)
return -EIO;
iob = qeth_get_buffer(&card->read);
@@ -305,7 +255,7 @@ static int qeth_issue_next_read(struct qeth_card *card)
return -ENOMEM;
}
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
- QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0);
if (rc) {
@@ -364,7 +314,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd = NULL;
- QETH_DBF_TEXT(TRACE, 5, "chkipad");
+ QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
@@ -400,10 +350,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
- QETH_DBF_TEXT(TRACE, 3, "irla");
+ QETH_CARD_TEXT(card, 3, "irla");
break;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
- QETH_DBF_TEXT(TRACE, 3, "urla");
+ QETH_CARD_TEXT(card, 3, "urla");
break;
default:
QETH_DBF_MESSAGE(2, "Received data is IPA "
@@ -420,7 +370,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
struct qeth_reply *reply, *r;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "clipalst");
+ QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
@@ -448,9 +398,9 @@ static int qeth_check_idx_response(struct qeth_card *card,
buffer[4],
((buffer[4] == 0x22) ?
" -- try another portname" : ""));
- QETH_DBF_TEXT(TRACE, 2, "ckidxres");
- QETH_DBF_TEXT(TRACE, 2, " idxterm");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ QETH_CARD_TEXT(card, 2, "ckidxres");
+ QETH_CARD_TEXT(card, 2, " idxterm");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
if (buffer[4] == 0xf6) {
dev_err(&card->gdev->dev,
"The qeth device is not configured "
@@ -467,8 +417,8 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 4, "setupccw");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "setupccw");
if (channel == &card->read)
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
else
@@ -481,7 +431,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
- QETH_DBF_TEXT(TRACE, 6, "getbuff");
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
index = channel->io_buf_no;
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
@@ -502,7 +452,7 @@ void qeth_release_buffer(struct qeth_channel *channel,
{
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 6, "relbuff");
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
spin_lock_irqsave(&channel->iob_lock, flags);
memset(iob->data, 0, QETH_BUFSIZE);
iob->state = BUF_STATE_FREE;
@@ -553,9 +503,8 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
int keep_reply;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
-
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "sndctlcb");
rc = qeth_check_idx_response(card, iob->data);
switch (rc) {
case 0:
@@ -563,6 +512,7 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
case -EIO:
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
+ /* fall through */
default:
goto out;
}
@@ -722,7 +672,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
- QETH_DBF_TEXT(TRACE, 2, "startrec");
+ QETH_CARD_TEXT(card, 2, "startrec");
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
@@ -732,15 +682,17 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
{
int dstat, cstat;
char *sense;
+ struct qeth_card *card;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ card = CARD_FROM_CDEV(cdev);
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
- QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
+ QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
@@ -753,23 +705,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
- QETH_DBF_TEXT(TRACE, 2, "REVIND");
+ QETH_CARD_TEXT(card, 2, "REVIND");
return 1;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
- QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
+ QETH_CARD_TEXT(card, 2, "CMDREJi");
return 1;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
- QETH_DBF_TEXT(TRACE, 2, "AFFE");
+ QETH_CARD_TEXT(card, 2, "AFFE");
return 1;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
- QETH_DBF_TEXT(TRACE, 2, "ZEROSEN");
+ QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
- QETH_DBF_TEXT(TRACE, 2, "DGENCHK");
+ QETH_CARD_TEXT(card, 2, "DGENCHK");
return 1;
}
return 0;
@@ -778,6 +730,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
static long __qeth_check_irb_error(struct ccw_device *cdev,
unsigned long intparm, struct irb *irb)
{
+ struct qeth_card *card;
+
+ card = CARD_FROM_CDEV(cdev);
+
if (!IS_ERR(irb))
return 0;
@@ -785,17 +741,15 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
case -EIO:
QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
dev_name(&cdev->dev));
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
- struct qeth_card *card = CARD_FROM_CDEV(cdev);
-
if (card && (card->data.ccwdev == cdev)) {
card->data.state = CH_STATE_DOWN;
wake_up(&card->wait_q);
@@ -805,8 +759,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
default:
QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
dev_name(&cdev->dev), PTR_ERR(irb));
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT(TRACE, 2, " rc???");
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT(card, 2, " rc???");
}
return PTR_ERR(irb);
}
@@ -822,8 +776,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct qeth_cmd_buffer *iob;
__u8 index;
- QETH_DBF_TEXT(TRACE, 5, "irq");
-
if (__qeth_check_irb_error(cdev, intparm, irb))
return;
cstat = irb->scsw.cmd.cstat;
@@ -833,15 +785,17 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (!card)
return;
+ QETH_CARD_TEXT(card, 5, "irq");
+
if (card->read.ccwdev == cdev) {
channel = &card->read;
- QETH_DBF_TEXT(TRACE, 5, "read");
+ QETH_CARD_TEXT(card, 5, "read");
} else if (card->write.ccwdev == cdev) {
channel = &card->write;
- QETH_DBF_TEXT(TRACE, 5, "write");
+ QETH_CARD_TEXT(card, 5, "write");
} else {
channel = &card->data;
- QETH_DBF_TEXT(TRACE, 5, "data");
+ QETH_CARD_TEXT(card, 5, "data");
}
atomic_set(&channel->irq_pending, 0);
@@ -857,12 +811,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
goto out;
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
- QETH_DBF_TEXT(TRACE, 6, "clrchpar");
+ QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
intparm = 0;
}
if (intparm == QETH_HALT_CHANNEL_PARM) {
- QETH_DBF_TEXT(TRACE, 6, "hltchpar");
+ QETH_CARD_TEXT(card, 6, "hltchpar");
/* we don't have to handle this further */
intparm = 0;
}
@@ -963,7 +917,7 @@ void qeth_clear_qdio_buffers(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(TRACE, 2, "clearqdbf");
+ QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i)
if (card->qdio.out_qs[i]) {
@@ -978,7 +932,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
int i = 0;
- QETH_DBF_TEXT(TRACE, 5, "freepool");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.init_pool.entry_list, init_list){
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
@@ -992,7 +945,6 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(TRACE, 2, "freeqdbf");
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
@@ -1089,7 +1041,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
- QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x",
+ QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
(u8) card->thread_start_mask,
(u8) card->thread_allowed_mask,
(u8) card->thread_running_mask);
@@ -1102,7 +1054,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
- QETH_DBF_TEXT(TRACE , 2, "strthrd");
+ QETH_CARD_TEXT(card , 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
@@ -1229,8 +1181,8 @@ static int qeth_clear_channel(struct qeth_channel *channel)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "clearch");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 3, "clearch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1253,8 +1205,8 @@ static int qeth_halt_channel(struct qeth_channel *channel)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "haltch");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 3, "haltch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1274,7 +1226,7 @@ static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
- QETH_DBF_TEXT(TRACE, 3, "haltchs");
+ QETH_CARD_TEXT(card, 3, "haltchs");
rc1 = qeth_halt_channel(&card->read);
rc2 = qeth_halt_channel(&card->write);
rc3 = qeth_halt_channel(&card->data);
@@ -1289,7 +1241,7 @@ static int qeth_clear_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
- QETH_DBF_TEXT(TRACE, 3, "clearchs");
+ QETH_CARD_TEXT(card, 3, "clearchs");
rc1 = qeth_clear_channel(&card->read);
rc2 = qeth_clear_channel(&card->write);
rc3 = qeth_clear_channel(&card->data);
@@ -1304,8 +1256,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "clhacrd");
- QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "clhacrd");
if (halt)
rc = qeth_halt_channels(card);
@@ -1318,7 +1269,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "qdioclr");
+ QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
@@ -1329,7 +1280,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
- QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
@@ -1340,7 +1291,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
- QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc);
+ QETH_CARD_TEXT_(card, 3, "2err%d", rc);
card->state = CARD_STATE_DOWN;
return rc;
}
@@ -1440,6 +1391,7 @@ static void qeth_init_func_level(struct qeth_card *card)
QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
break;
case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
@@ -1637,15 +1589,18 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
"host\n");
break;
case QETH_IDX_ACT_ERR_AUTH:
+ case QETH_IDX_ACT_ERR_AUTH_USER:
dev_err(&card->read.ccwdev->dev,
"Setting the device online failed because of "
- "insufficient LPAR authorization\n");
+ "insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
}
+ QETH_CARD_TEXT_(card, 2, "idxread%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
goto out;
}
@@ -1705,7 +1660,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "sendctl");
+ QETH_CARD_TEXT(card, 2, "sendctl");
reply = qeth_alloc_reply(card);
if (!reply) {
@@ -1732,7 +1687,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
event_timeout = QETH_TIMEOUT;
timeout = jiffies + event_timeout;
- QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
@@ -1741,7 +1696,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
dev_name(&card->write.ccwdev->dev), rc);
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
qeth_put_reply(reply);
@@ -1978,7 +1933,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
- QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
+ QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2335,7 +2290,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry;
- QETH_DBF_TEXT(TRACE, 5, "inwrklst");
+ QETH_CARD_TEXT(card, 5, "inwrklst");
list_for_each_entry(entry,
&card->qdio.init_pool.entry_list, init_list) {
@@ -2522,7 +2477,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int rc;
char prot_type;
- QETH_DBF_TEXT(TRACE, 4, "sendipa");
+ QETH_CARD_TEXT(card, 4, "sendipa");
if (card->options.layer2)
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -2582,7 +2537,7 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+ QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0)
@@ -2597,7 +2552,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
+ QETH_CARD_TEXT(card, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
@@ -2633,7 +2588,7 @@ int qeth_query_setadapterparms(struct qeth_card *card)
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 3, "queryadp");
+ QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
@@ -2645,13 +2600,12 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
- QETH_DBF_TEXT(TRACE, 2, dbftext);
- QETH_DBF_TEXT(QERR, 2, dbftext);
- QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
+ QETH_CARD_TEXT(card, 2, dbftext);
+ QETH_CARD_TEXT_(card, 2, " F15=%02X",
buf->element[15].flags & 0xff);
- QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
+ QETH_CARD_TEXT_(card, 2, " F14=%02X",
buf->element[14].flags & 0xff);
- QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
+ QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].flags & 0xff) == 0x12) {
card->stats.rx_dropped++;
return 0;
@@ -2717,8 +2671,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
if (rc) {
dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc);
- QETH_DBF_TEXT(TRACE, 2, "qinberr");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "qinberr");
}
queue->next_buf_to_init = (queue->next_buf_to_init + count) %
QDIO_MAX_BUFFERS_PER_Q;
@@ -2731,7 +2684,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
- QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
+ QETH_CARD_TEXT(card, 6, "hdsnderr");
if (card->info.type == QETH_CARD_TYPE_IQD) {
if (sbalf15 == 0) {
qdio_err = 0;
@@ -2747,9 +2700,8 @@ static int qeth_handle_send_error(struct qeth_card *card,
if ((sbalf15 >= 15) && (sbalf15 <= 31))
return QETH_SEND_ERROR_RETRY;
- QETH_DBF_TEXT(TRACE, 1, "lnkfail");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
+ QETH_CARD_TEXT(card, 1, "lnkfail");
+ QETH_CARD_TEXT_(card, 1, "%04x %02x",
(u16)qdio_err, (u8)sbalf15);
return QETH_SEND_ERROR_LINK_FAILURE;
}
@@ -2764,7 +2716,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
- QETH_DBF_TEXT(TRACE, 6, "np->pack");
+ QETH_CARD_TEXT(queue->card, 6, "np->pack");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_dp_p++;
queue->do_pack = 1;
@@ -2787,7 +2739,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
- QETH_DBF_TEXT(TRACE, 6, "pack->np");
+ QETH_CARD_TEXT(queue->card, 6, "pack->np");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
@@ -2896,9 +2848,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* ignore temporary SIGA errors without busy condition */
if (rc == QDIO_ERROR_SIGA_TARGET)
return;
- QETH_DBF_TEXT(TRACE, 2, "flushbuf");
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
+ QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+ QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
@@ -2960,10 +2911,9 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
int i;
unsigned qeth_send_err;
- QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
+ QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 2, "achkcond");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "achkcond");
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
@@ -3033,13 +2983,11 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
int qeth_get_elements_no(struct qeth_card *card, void *hdr,
struct sk_buff *skb, int elems)
{
- int elements_needed = 0;
+ int dlen = skb->len - skb->data_len;
+ int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
+ PFN_DOWN((unsigned long)skb->data);
- if (skb_shinfo(skb)->nr_frags > 0)
- elements_needed = (skb_shinfo(skb)->nr_frags + 1);
- if (elements_needed == 0)
- elements_needed = 1 + (((((unsigned long) skb->data) %
- PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
+ elements_needed += skb_shinfo(skb)->nr_frags;
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
@@ -3050,15 +2998,35 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
+int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
+{
+ int hroom, inpage, rest;
+
+ if (((unsigned long)skb->data & PAGE_MASK) !=
+ (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
+ hroom = skb_headroom(skb);
+ inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
+ rest = len - inpage;
+ if (rest > hroom)
+ return 1;
+ memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
+ skb->data -= rest;
+ QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+
static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
- int length = skb->len;
+ int length = skb->len - skb->data_len;
int length_here;
int element;
char *data;
- int first_lap ;
+ int first_lap, cnt;
+ struct skb_frag_struct *frag;
element = *next_element_to_fill;
data = skb->data;
@@ -3081,10 +3049,14 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
length -= length_here;
if (!length) {
if (first_lap)
- buffer->element[element].flags = 0;
+ if (skb_shinfo(skb)->nr_frags)
+ buffer->element[element].flags =
+ SBAL_FLAGS_FIRST_FRAG;
+ else
+ buffer->element[element].flags = 0;
else
buffer->element[element].flags =
- SBAL_FLAGS_LAST_FRAG;
+ SBAL_FLAGS_MIDDLE_FRAG;
} else {
if (first_lap)
buffer->element[element].flags =
@@ -3097,6 +3069,18 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
element++;
first_lap = 0;
}
+
+ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ frag = &skb_shinfo(skb)->frags[cnt];
+ buffer->element[element].addr = (char *)page_to_phys(frag->page)
+ + frag->page_offset;
+ buffer->element[element].length = frag->size;
+ buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
+ element++;
+ }
+
+ if (buffer->element[element - 1].flags)
+ buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
*next_element_to_fill = element;
}
@@ -3137,20 +3121,16 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buf->next_element_to_fill++;
}
- if (skb_shinfo(skb)->nr_frags == 0)
- __qeth_fill_buffer(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill, offset);
- else
- __qeth_fill_buffer_frag(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill);
+ __qeth_fill_buffer(skb, buffer, large_send,
+ (int *)&buf->next_element_to_fill, offset);
if (!queue->do_pack) {
- QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
+ QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
/* set state to PRIMED -> will be flushed */
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt = 1;
} else {
- QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
+ QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
if (buf->next_element_to_fill >=
@@ -3210,7 +3190,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
rc = dev_queue_xmit(skb);
} else {
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(QERR, 2, "qrdrop");
+ QETH_CARD_TEXT(card, 2, "qrdrop");
}
}
return 0;
@@ -3312,14 +3292,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_ipacmd_setadpparms *setparms;
- QETH_DBF_TEXT(TRACE, 4, "prmadpcb");
+ QETH_CARD_TEXT(card, 4, "prmadpcb");
cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
@@ -3333,7 +3313,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "setprom");
+ QETH_CARD_TEXT(card, 4, "setprom");
if (((dev->flags & IFF_PROMISC) &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
@@ -3343,7 +3323,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
mode = SET_PROMISC_MODE_OFF;
if (dev->flags & IFF_PROMISC)
mode = SET_PROMISC_MODE_ON;
- QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode);
+ QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -3360,9 +3340,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu)
card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "chgmtu");
+ QETH_CARD_TEXT(card, 4, "chgmtu");
sprintf(dbf_text, "%8x", new_mtu);
- QETH_DBF_TEXT(TRACE, 4, dbf_text);
+ QETH_CARD_TEXT(card, 4, dbf_text);
if (new_mtu < 64)
return -EINVAL;
@@ -3382,7 +3362,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev)
card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 5, "getstat");
+ QETH_CARD_TEXT(card, 5, "getstat");
return &card->stats;
}
@@ -3393,7 +3373,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "chgmaccb");
+ QETH_CARD_TEXT(card, 4, "chgmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
@@ -3413,7 +3393,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "chgmac");
+ QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -3435,7 +3415,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_set_access_ctrl *access_ctrl_req;
int rc;
- QETH_DBF_TEXT(TRACE, 4, "setaccb");
+ QETH_CARD_TEXT(card, 4, "setaccb");
cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
@@ -3533,7 +3513,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
- QETH_DBF_TEXT(TRACE, 4, "setacctl");
+ QETH_CARD_TEXT(card, 4, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -3555,7 +3535,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "setactlo");
+ QETH_CARD_TEXT(card, 4, "setactlo");
if ((card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) &&
@@ -3583,8 +3563,8 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 4, "txtimeo");
card = dev->ml_priv;
+ QETH_CARD_TEXT(card, 4, "txtimeo");
card->stats.tx_errors++;
qeth_schedule_recovery(card);
}
@@ -3663,7 +3643,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
{
u16 s1, s2;
- QETH_DBF_TEXT(TRACE, 4, "sendsnmp");
+ QETH_CARD_TEXT(card, 4, "sendsnmp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -3688,7 +3668,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
unsigned char *data;
__u16 data_len;
- QETH_DBF_TEXT(TRACE, 3, "snpcmdcb");
+ QETH_CARD_TEXT(card, 3, "snpcmdcb");
cmd = (struct qeth_ipa_cmd *) sdata;
data = (unsigned char *)((char *)cmd - reply->offset);
@@ -3696,13 +3676,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
@@ -3713,13 +3693,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
- QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM);
+ QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
cmd->hdr.return_code = -ENOMEM;
return 0;
}
- QETH_DBF_TEXT_(TRACE, 4, "snore%i",
+ QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
- QETH_DBF_TEXT_(TRACE, 4, "sseqn%i",
+ QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
@@ -3733,9 +3713,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
}
qinfo->udata_offset += data_len;
/* check if all replies received ... */
- QETH_DBF_TEXT_(TRACE, 4, "srtot%i",
+ QETH_CARD_TEXT_(card, 4, "srtot%i",
cmd->data.setadapterparms.hdr.used_total);
- QETH_DBF_TEXT_(TRACE, 4, "srseq%i",
+ QETH_CARD_TEXT_(card, 4, "srseq%i",
cmd->data.setadapterparms.hdr.seq_no);
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
@@ -3752,7 +3732,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "snmpcmd");
+ QETH_CARD_TEXT(card, 3, "snmpcmd");
if (card->info.guestlan)
return -EOPNOTSUPP;
@@ -3766,7 +3746,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
return -EFAULT;
ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
if (!ureq) {
- QETH_DBF_TEXT(TRACE, 2, "snmpnome");
+ QETH_CARD_TEXT(card, 2, "snmpnome");
return -ENOMEM;
}
if (copy_from_user(ureq, udata,
@@ -4120,13 +4100,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb_len -= data_len;
if (skb_len) {
if (qeth_is_last_sbale(element)) {
- QETH_DBF_TEXT(TRACE, 4, "unexeob");
- QETH_DBF_TEXT_(TRACE, 4, "%s",
- CARD_BUS_ID(card));
- QETH_DBF_TEXT(QERR, 2, "unexeob");
- QETH_DBF_TEXT_(QERR, 2, "%s",
- CARD_BUS_ID(card));
- QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer));
+ QETH_CARD_TEXT(card, 4, "unexeob");
+ QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
card->stats.rx_errors++;
return NULL;
@@ -4147,8 +4122,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
return skb;
no_mem:
if (net_ratelimit()) {
- QETH_DBF_TEXT(TRACE, 2, "noskbmem");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "noskbmem");
}
card->stats.rx_dropped++;
return NULL;
@@ -4164,17 +4138,17 @@ static void qeth_unregister_dbf_views(void)
}
}
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...)
+void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
{
char dbf_txt_buf[32];
va_list args;
- if (level > (qeth_dbf[dbf_nix].id)->level)
+ if (level > id->level)
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
- debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
+ debug_text_event(id, level, dbf_txt_buf);
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
@@ -4282,6 +4256,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
struct device *dev;
int rc;
unsigned long flags;
+ char dbf_name[20];
QETH_DBF_TEXT(SETUP, 2, "probedev");
@@ -4297,6 +4272,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = -ENOMEM;
goto err_dev;
}
+
+ snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+ dev_name(&gdev->dev));
+ card->debug = debug_register(dbf_name, 2, 1, 8);
+ if (!card->debug) {
+ QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+ rc = -ENOMEM;
+ goto err_card;
+ }
+ debug_register_view(card->debug, &debug_hex_ascii_view);
+
card->read.ccwdev = gdev->cdev[0];
card->write.ccwdev = gdev->cdev[1];
card->data.ccwdev = gdev->cdev[2];
@@ -4309,12 +4295,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = qeth_determine_card_type(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- goto err_card;
+ goto err_dbf;
}
rc = qeth_setup_card(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- goto err_card;
+ goto err_dbf;
}
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -4322,7 +4308,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
else
rc = qeth_core_create_device_attributes(dev);
if (rc)
- goto err_card;
+ goto err_dbf;
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
@@ -4352,6 +4338,8 @@ err_attr:
qeth_core_remove_osn_attributes(dev);
else
qeth_core_remove_device_attributes(dev);
+err_dbf:
+ debug_unregister(card->debug);
err_card:
qeth_core_free_card(card);
err_dev:
@@ -4375,6 +4363,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
} else {
qeth_core_remove_device_attributes(&gdev->dev);
}
+ debug_unregister(card->debug);
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f9ed24d..e37dd8c 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -616,8 +616,9 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
-#define QETH_IDX_ACT_ERR_EXCL 0x19
-#define QETH_IDX_ACT_ERR_AUTH 0x1E
+#define QETH_IDX_ACT_ERR_EXCL 0x19
+#define QETH_IDX_ACT_ERR_AUTH 0x1E
+#define QETH_IDX_ACT_ERR_AUTH_USER 0x20
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d43f57a..32d07c2 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -79,7 +79,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EOPNOTSUPP;
}
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+ QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
return rc;
}
@@ -130,7 +130,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 *mac;
- QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
+ QETH_CARD_TEXT(card, 2, "L2Sgmacb");
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
/* MAC already registered, needed in couple/uncouple case */
@@ -147,7 +147,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
qeth_l2_send_setgroupmac_cb);
}
@@ -159,7 +159,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 *mac;
- QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
+ QETH_CARD_TEXT(card, 2, "L2Dgmacb");
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
if (cmd->hdr.return_code)
@@ -170,7 +170,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
+ QETH_CARD_TEXT(card, 2, "L2Dgmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
qeth_l2_send_delgroupmac_cb);
}
@@ -262,15 +262,14 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
+ QETH_CARD_TEXT(card, 2, "L2sdvcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
"Continuing\n", cmd->data.setdelvlan.vlan_id,
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
- QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
+ QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
return 0;
}
@@ -281,7 +280,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
+ QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
@@ -292,7 +291,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
{
struct qeth_vlan_vid *id;
- QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
+ QETH_CARD_TEXT(card, 3, "L2prcvln");
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
if (clear)
@@ -310,13 +309,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
- QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+ QETH_CARD_TEXT(card, 3, "aidOSM");
return;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "aidREC");
+ QETH_CARD_TEXT(card, 3, "aidREC");
return;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
@@ -334,13 +333,13 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+ QETH_CARD_TEXT(card, 3, "kidOSM");
return;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ QETH_CARD_TEXT(card, 3, "kidREC");
return;
}
spin_lock_bh(&card->vlanlock);
@@ -456,7 +455,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
/* else unknown */
default:
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+ QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
@@ -474,7 +473,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
+ QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
@@ -488,10 +487,10 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
+ QETH_CARD_TEXT(card, 2, "L2Smaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
switch (cmd->hdr.return_code) {
case IPA_RC_L2_DUP_MAC:
@@ -523,7 +522,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
qeth_l2_send_setmac_cb);
}
@@ -534,10 +533,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
+ QETH_CARD_TEXT(card, 2, "L2Dmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
cmd->hdr.return_code = -EIO;
return 0;
}
@@ -548,7 +547,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
+ QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
@@ -594,23 +593,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
struct qeth_card *card = dev->ml_priv;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "setmac");
+ QETH_CARD_TEXT(card, 3, "setmac");
if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
- QETH_DBF_TEXT(TRACE, 3, "setmcINV");
+ QETH_CARD_TEXT(card, 3, "setmcINV");
return -EOPNOTSUPP;
}
if (card->info.type == QETH_CARD_TYPE_OSN ||
card->info.type == QETH_CARD_TYPE_OSM ||
card->info.type == QETH_CARD_TYPE_OSX) {
- QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
+ QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
- QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
- QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
+ QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "setmcREC");
+ QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
@@ -627,7 +625,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
if (card->info.type == QETH_CARD_TYPE_OSN)
return ;
- QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
@@ -714,10 +712,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ if (qeth_hdr_chk_and_bounce(new_skb,
+ sizeof(struct qeth_hdr_layer2)))
+ goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements);
- else
+ } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements, data_offset, hd_len);
if (!rc) {
@@ -771,11 +772,10 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
+ QETH_CARD_TEXT(card, 1, "qdinchk");
+ QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ QETH_CARD_TEXT_(card, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
@@ -799,13 +799,13 @@ static int qeth_l2_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethopen");
+ QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
if ((card->info.type != QETH_CARD_TYPE_OSN) &&
(!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
- QETH_DBF_TEXT(TRACE, 4, "nomacadr");
+ QETH_CARD_TEXT(card, 4, "nomacadr");
return -EPERM;
}
card->data.state = CH_STATE_UP;
@@ -822,7 +822,7 @@ static int qeth_l2_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethstop");
+ QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
@@ -1074,11 +1074,10 @@ static int qeth_l2_recover(void *ptr)
int rc = 0;
card = (struct qeth_card *) ptr;
- QETH_DBF_TEXT(TRACE, 2, "recover1");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
- QETH_DBF_TEXT(TRACE, 2, "recover2");
+ QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
card->use_hard_stop = 1;
@@ -1181,12 +1180,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 5, "osndctrd");
+ QETH_CARD_TEXT(card, 5, "osndctrd");
wait_event(card->wait_q,
atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
- QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
+ QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
@@ -1194,7 +1193,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
"ccw_device_start rc = %i\n", rc);
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_release_buffer(iob->channel, iob);
atomic_set(&card->write.irq_pending, 0);
wake_up(&card->wait_q);
@@ -1207,7 +1206,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
{
u16 s1, s2;
- QETH_DBF_TEXT(TRACE, 4, "osndipa");
+ QETH_CARD_TEXT(card, 4, "osndipa");
qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
@@ -1225,12 +1224,12 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
if (!dev)
return -ENODEV;
card = dev->ml_priv;
if (!card)
return -ENODEV;
+ QETH_CARD_TEXT(card, 2, "osnsdmc");
if ((card->state != CARD_STATE_UP) &&
(card->state != CARD_STATE_SOFTSETUP))
return -ENODEV;
@@ -1247,13 +1246,13 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 2, "osnreg");
*dev = qeth_l2_netdev_by_devno(read_dev_no);
if (*dev == NULL)
return -ENODEV;
card = (*dev)->ml_priv;
if (!card)
return -ENODEV;
+ QETH_CARD_TEXT(card, 2, "osnreg");
if ((assist_cb == NULL) || (data_cb == NULL))
return -EINVAL;
card->osn_info.assist_cb = assist_cb;
@@ -1266,12 +1265,12 @@ void qeth_osn_deregister(struct net_device *dev)
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 2, "osndereg");
if (!dev)
return;
card = dev->ml_priv;
if (!card)
return;
+ QETH_CARD_TEXT(card, 2, "osndereg");
card->osn_info.assist_cb = NULL;
card->osn_info.data_cb = NULL;
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 61adae2..61d348e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -287,7 +287,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
addr->users += add ? 1 : -1;
if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, addr)) {
- QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
+ QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
list_add_tail(&addr->entry, card->ip_tbd_list);
@@ -301,13 +301,13 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "delip");
+ QETH_CARD_TEXT(card, 4, "delip");
if (addr->proto == QETH_PROT_IPV4)
- QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+ QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
else {
- QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 0);
@@ -320,12 +320,12 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "addip");
+ QETH_CARD_TEXT(card, 4, "addip");
if (addr->proto == QETH_PROT_IPV4)
- QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+ QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
else {
- QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 1);
@@ -353,10 +353,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
struct qeth_ipaddr *iptodo;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "delmc");
+ QETH_CARD_TEXT(card, 4, "delmc");
iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!iptodo) {
- QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
+ QETH_CARD_TEXT(card, 2, "dmcnomem");
return;
}
iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
@@ -457,8 +457,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
unsigned long flags;
int rc;
- QETH_DBF_TEXT(TRACE, 2, "sdiplist");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "sdiplist");
+ QETH_CARD_HEX(card, 2, &card, sizeof(void *));
if (card->options.sniffer)
return;
@@ -466,7 +466,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(TRACE, 0, "silnomem");
+ QETH_CARD_TEXT(card, 0, "silnomem");
card->ip_tbd_list = tbd_list;
spin_unlock_irqrestore(&card->ip_lock, flags);
return;
@@ -517,7 +517,7 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
struct qeth_ipaddr *addr, *tmp;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "clearip");
+ QETH_CARD_TEXT(card, 4, "clearip");
if (recover && card->options.sniffer)
return;
spin_lock_irqsave(&card->ip_lock, flags);
@@ -577,7 +577,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "setdelmc");
+ QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -615,8 +615,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 netmask[16];
- QETH_DBF_TEXT(TRACE, 4, "setdelip");
- QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
+ QETH_CARD_TEXT(card, 4, "setdelip");
+ QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -645,7 +645,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "setroutg");
+ QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
@@ -689,7 +689,7 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "setrtg4");
+ QETH_CARD_TEXT(card, 3, "setrtg4");
qeth_l3_correct_routing_type(card, &card->options.route4.type,
QETH_PROT_IPV4);
@@ -709,7 +709,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "setrtg6");
+ QETH_CARD_TEXT(card, 3, "setrtg6");
#ifdef CONFIG_QETH_IPV6
if (!qeth_is_supported(card, IPA_IPV6))
@@ -753,7 +753,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 2, "addipato");
+ QETH_CARD_TEXT(card, 2, "addipato");
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
@@ -778,7 +778,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
struct qeth_ipato_entry *ipatoe, *tmp;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 2, "delipato");
+ QETH_CARD_TEXT(card, 2, "delipato");
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
@@ -806,11 +806,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addvipa4");
+ QETH_CARD_TEXT(card, 2, "addvipa4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addvipa6");
+ QETH_CARD_TEXT(card, 2, "addvipa6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -841,11 +841,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "delvipa4");
+ QETH_CARD_TEXT(card, 2, "delvipa4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "delvipa6");
+ QETH_CARD_TEXT(card, 2, "delvipa6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -870,11 +870,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "addrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "addrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -905,11 +905,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "addrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "addrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -929,15 +929,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int cnt = 3;
if (addr->proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "setaddr4");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+ QETH_CARD_TEXT(card, 2, "setaddr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "setaddr6");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_TEXT(card, 2, "setaddr6");
+ QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
- QETH_DBF_TEXT(TRACE, 2, "setaddr?");
- QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+ QETH_CARD_TEXT(card, 2, "setaddr?");
+ QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
do {
if (addr->is_multicast)
@@ -946,10 +946,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
addr->set_flags);
if (rc)
- QETH_DBF_TEXT(TRACE, 2, "failed");
+ QETH_CARD_TEXT(card, 2, "failed");
} while ((--cnt > 0) && rc);
if (rc) {
- QETH_DBF_TEXT(TRACE, 2, "FAILED");
+ QETH_CARD_TEXT(card, 2, "FAILED");
qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
@@ -963,15 +963,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
int rc = 0;
if (addr->proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "deladdr4");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+ QETH_CARD_TEXT(card, 2, "deladdr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "deladdr6");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_TEXT(card, 2, "deladdr6");
+ QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
- QETH_DBF_TEXT(TRACE, 2, "deladdr?");
- QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+ QETH_CARD_TEXT(card, 2, "deladdr?");
+ QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
if (addr->is_multicast)
rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
@@ -979,7 +979,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
addr->del_flags);
if (rc)
- QETH_DBF_TEXT(TRACE, 2, "failed");
+ QETH_CARD_TEXT(card, 2, "failed");
return rc;
}
@@ -1012,7 +1012,7 @@ static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "adpmode");
+ QETH_CARD_TEXT(card, 4, "adpmode");
iob = qeth_get_adapter_cmd(card, command,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -1027,7 +1027,7 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 4, "adphstr");
+ QETH_CARD_TEXT(card, 4, "adphstr");
if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
rc = qeth_l3_send_setadp_mode(card,
@@ -1093,7 +1093,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+ QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0) {
@@ -1106,13 +1106,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
+ QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
}
if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.tx_csum_mask =
cmd->data.setassparms.data.flags_32bit;
- QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+ QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
}
return 0;
@@ -1125,7 +1125,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "getasscm");
+ QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1147,7 +1147,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card,
int rc;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "sendassp");
+ QETH_CARD_TEXT(card, 4, "sendassp");
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (len <= sizeof(__u32))
@@ -1166,7 +1166,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "simassp6");
+ QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
@@ -1182,7 +1182,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
int length = 0;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "simassp4");
+ QETH_CARD_TEXT(card, 4, "simassp4");
if (data)
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
@@ -1196,7 +1196,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "ipaarp");
+ QETH_CARD_TEXT(card, 3, "ipaarp");
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
dev_info(&card->gdev->dev,
@@ -1218,7 +1218,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
+ QETH_CARD_TEXT(card, 3, "ipaipfrg");
if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
dev_info(&card->gdev->dev,
@@ -1243,7 +1243,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
+ QETH_CARD_TEXT(card, 3, "stsrcmac");
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
dev_info(&card->gdev->dev,
@@ -1265,7 +1265,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtvlan");
+ QETH_CARD_TEXT(card, 3, "strtvlan");
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
dev_info(&card->gdev->dev,
@@ -1289,7 +1289,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stmcast");
+ QETH_CARD_TEXT(card, 3, "stmcast");
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
dev_info(&card->gdev->dev,
@@ -1349,7 +1349,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "softipv6");
+ QETH_CARD_TEXT(card, 3, "softipv6");
if (card->info.type == QETH_CARD_TYPE_IQD)
goto out;
@@ -1395,7 +1395,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtipv6");
+ QETH_CARD_TEXT(card, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
dev_info(&card->gdev->dev,
@@ -1412,7 +1412,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
+ QETH_CARD_TEXT(card, 3, "stbrdcst");
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
dev_info(&card->gdev->dev,
@@ -1512,7 +1512,7 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtcsum");
+ QETH_CARD_TEXT(card, 3, "strtcsum");
if (card->options.checksum_type == NO_CHECKSUMMING) {
dev_info(&card->gdev->dev,
@@ -1569,7 +1569,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "sttso");
+ QETH_CARD_TEXT(card, 3, "sttso");
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
dev_info(&card->gdev->dev,
@@ -1596,7 +1596,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
static int qeth_l3_start_ipassists(struct qeth_card *card)
{
- QETH_DBF_TEXT(TRACE, 3, "strtipas");
+ QETH_CARD_TEXT(card, 3, "strtipas");
qeth_set_access_ctrl_online(card); /* go on*/
qeth_l3_start_ipa_arp_processing(card); /* go on*/
@@ -1619,7 +1619,7 @@ static int qeth_l3_put_unique_id(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "puniqeid");
+ QETH_CARD_TEXT(card, 2, "puniqeid");
if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
UNIQUE_ID_NOT_BY_CARD)
@@ -1723,7 +1723,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+ QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
switch (cmd->data.diagass.action) {
case QETH_DIAGS_CMD_TRACE_QUERY:
break;
@@ -1800,7 +1800,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
struct ip_mc_list *im4;
char buf[MAX_ADDR_LEN];
- QETH_DBF_TEXT(TRACE, 4, "addmc");
+ QETH_CARD_TEXT(card, 4, "addmc");
for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1820,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
struct vlan_group *vg;
int i;
- QETH_DBF_TEXT(TRACE, 4, "addmcvl");
+ QETH_CARD_TEXT(card, 4, "addmcvl");
if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
return;
@@ -1844,7 +1844,7 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
{
struct in_device *in4_dev;
- QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
+ QETH_CARD_TEXT(card, 4, "chkmcv4");
in4_dev = in_dev_get(card->dev);
if (in4_dev == NULL)
return;
@@ -1862,7 +1862,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
struct ifmcaddr6 *im6;
char buf[MAX_ADDR_LEN];
- QETH_DBF_TEXT(TRACE, 4, "addmc6");
+ QETH_CARD_TEXT(card, 4, "addmc6");
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
@@ -1883,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
struct vlan_group *vg;
int i;
- QETH_DBF_TEXT(TRACE, 4, "admc6vl");
+ QETH_CARD_TEXT(card, 4, "admc6vl");
if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
return;
@@ -1907,7 +1907,7 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
{
struct inet6_dev *in6_dev;
- QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
+ QETH_CARD_TEXT(card, 4, "chkmcv6");
if (!qeth_is_supported(card, IPA_IPV6))
return ;
in6_dev = in6_dev_get(card->dev);
@@ -1928,7 +1928,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
struct in_ifaddr *ifa;
struct qeth_ipaddr *addr;
- QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
+ QETH_CARD_TEXT(card, 4, "frvaddr4");
in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev)
@@ -1954,7 +1954,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
struct inet6_ifaddr *ifa;
struct qeth_ipaddr *addr;
- QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
+ QETH_CARD_TEXT(card, 4, "frvaddr6");
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
@@ -1989,7 +1989,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev,
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "vlanreg");
+ QETH_CARD_TEXT(card, 4, "vlanreg");
spin_lock_irqsave(&card->vlanlock, flags);
card->vlangrp = grp;
spin_unlock_irqrestore(&card->vlanlock, flags);
@@ -2005,9 +2005,9 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
- QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ QETH_CARD_TEXT(card, 3, "kidREC");
return;
}
spin_lock_irqsave(&card->vlanlock, flags);
@@ -2162,7 +2162,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
break;
default:
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+ QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
@@ -2229,7 +2229,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
card = vlan_dev_real_dev(dev)->ml_priv;
if (card && card->options.layer2)
card = NULL;
- QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
+ if (card)
+ QETH_CARD_TEXT_(card, 4, "%d", rc);
return card ;
}
@@ -2307,10 +2308,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
} else if (card->options.sniffer && /* HiperSockets trace */
qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
if (dev->flags & IFF_PROMISC) {
- QETH_DBF_TEXT(TRACE, 3, "+promisc");
+ QETH_CARD_TEXT(card, 3, "+promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
} else {
- QETH_DBF_TEXT(TRACE, 3, "-promisc");
+ QETH_CARD_TEXT(card, 3, "-promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
}
}
@@ -2320,7 +2321,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
@@ -2365,7 +2366,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
+ QETH_CARD_TEXT(card, 3, "arpstnoe");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2417,17 +2418,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
int uentry_size;
int i;
- QETH_DBF_TEXT(TRACE, 4, "arpquecb");
+ QETH_CARD_TEXT(card, 4, "arpquecb");
qinfo = (struct qeth_arp_query_info *) reply->param;
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setassparms.hdr.return_code) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
- QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code);
return 0;
}
qdata = &cmd->data.setassparms.data.query_arp;
@@ -2449,14 +2450,14 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) <
qdata->no_entries * uentry_size){
- QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
+ QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
cmd->hdr.return_code = -ENOMEM;
goto out_error;
}
- QETH_DBF_TEXT_(TRACE, 4, "anore%i",
+ QETH_CARD_TEXT_(card, 4, "anore%i",
cmd->data.setassparms.hdr.number_of_replies);
- QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
- QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
+ QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
+ QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
/* strip off "media specific information" */
@@ -2492,7 +2493,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
unsigned long),
void *reply_param)
{
- QETH_DBF_TEXT(TRACE, 4, "sendarp");
+ QETH_CARD_TEXT(card, 4, "sendarp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -2508,7 +2509,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpquery");
+ QETH_CARD_TEXT(card, 3, "arpquery");
if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
IPA_ARP_PROCESSING)) {
@@ -2551,7 +2552,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpadent");
+ QETH_CARD_TEXT(card, 3, "arpadent");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2590,7 +2591,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arprment");
+ QETH_CARD_TEXT(card, 3, "arprment");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2626,7 +2627,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
int rc;
int tmp;
- QETH_DBF_TEXT(TRACE, 3, "arpflush");
+ QETH_CARD_TEXT(card, 3, "arpflush");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2734,7 +2735,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EOPNOTSUPP;
}
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+ QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
return rc;
}
@@ -2903,19 +2904,11 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
tcp_hdr(skb)->doff * 4;
int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
- int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
+ int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
elements += skb_shinfo(skb)->nr_frags;
return elements;
}
-static inline int qeth_l3_tso_check(struct sk_buff *skb)
-{
- int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
- (unsigned long)skb->data;
- return (((unsigned long)skb->data & PAGE_MASK) !=
- (((unsigned long)skb->data + len) & PAGE_MASK));
-}
-
static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int rc;
@@ -3015,8 +3008,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
(cast_type == RTN_UNSPEC)) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr_tso));
- if (qeth_l3_tso_check(new_skb))
- QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
@@ -3047,10 +3038,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
elements_needed += elems;
nr_frags = skb_shinfo(new_skb)->nr_frags;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ int len;
+ if (large_send == QETH_LARGE_SEND_TSO)
+ len = ((unsigned long)tcp_hdr(new_skb) +
+ tcp_hdr(new_skb)->doff * 4) -
+ (unsigned long)new_skb->data;
+ else
+ len = sizeof(struct qeth_hdr_layer3);
+
+ if (qeth_hdr_chk_and_bounce(new_skb, len))
+ goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed);
- else
+ } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, data_offset, 0);
@@ -3103,7 +3104,7 @@ static int qeth_l3_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethopen");
+ QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
card->data.state = CH_STATE_UP;
@@ -3119,7 +3120,7 @@ static int qeth_l3_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethstop");
+ QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
@@ -3312,11 +3313,10 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
+ QETH_CARD_TEXT(card, 1, "qdinchk");
+ QETH_CARD_TEXT_(card, 1, "%04X%04X",
first_element, count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ QETH_CARD_TEXT_(card, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
@@ -3522,11 +3522,11 @@ static int qeth_l3_recover(void *ptr)
int rc = 0;
card = (struct qeth_card *) ptr;
- QETH_DBF_TEXT(TRACE, 2, "recover1");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "recover1");
+ QETH_CARD_HEX(card, 2, &card, sizeof(void *));
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
- QETH_DBF_TEXT(TRACE, 2, "recover2");
+ QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
card->use_hard_stop = 1;
@@ -3624,8 +3624,8 @@ static int qeth_l3_ip_event(struct notifier_block *this,
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- QETH_DBF_TEXT(TRACE, 3, "ipevent");
card = qeth_l3_get_card_from_dev(dev);
+ QETH_CARD_TEXT(card, 3, "ipevent");
if (!card)
return NOTIFY_DONE;
@@ -3671,11 +3671,11 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
struct qeth_ipaddr *addr;
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 3, "ip6event");
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
+ QETH_CARD_TEXT(card, 3, "ip6event");
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
@@ -3714,7 +3714,7 @@ static int qeth_l3_register_notifiers(void)
{
int rc;
- QETH_DBF_TEXT(TRACE, 5, "regnotif");
+ QETH_DBF_TEXT(SETUP, 5, "regnotif");
rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
if (rc)
return rc;
@@ -3733,7 +3733,7 @@ static int qeth_l3_register_notifiers(void)
static void qeth_l3_unregister_notifiers(void)
{
- QETH_DBF_TEXT(TRACE, 5, "unregnot");
+ QETH_DBF_TEXT(SETUP, 5, "unregnot");
BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
#ifdef CONFIG_QETH_IPV6
BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 7049127..65e1cf1 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -47,6 +47,7 @@ static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
+static int iucv_path_connected;
static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -142,8 +143,10 @@ static int smsg_pm_freeze(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_freeze\n");
#endif
- if (smsg_path)
+ if (smsg_path && iucv_path_connected) {
iucv_path_sever(smsg_path, NULL);
+ iucv_path_connected = 0;
+ }
return 0;
}
@@ -154,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_restore_thaw\n");
#endif
- if (smsg_path) {
+ if (smsg_path && iucv_path_connected) {
memset(smsg_path, 0, sizeof(*smsg_path));
smsg_path->msglim = 255;
smsg_path->flags = 0;
@@ -165,6 +168,8 @@ static int smsg_pm_restore_thaw(struct device *dev)
printk(KERN_ERR
"iucv_path_connect returned with rc %i\n", rc);
#endif
+ if (!rc)
+ iucv_path_connected = 1;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
}
return 0;
@@ -214,6 +219,8 @@ static int __init smsg_init(void)
NULL, NULL, NULL);
if (rc)
goto out_free_path;
+ else
+ iucv_path_connected = 1;
smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!smsg_dev) {
rc = -ENOMEM;
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index dadd686..8bb715a 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -807,6 +807,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 7a582e8..5ede9c2 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -71,7 +71,7 @@ static int is_valid_iface(struct net_device *net_dev)
#endif
/* Device is being bridged */
- /* if (net_dev->br_port != NULL)
+ /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
return 0; */
return 1;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 5c0d06c..fb69b01 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -171,7 +171,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
int i, count;
rndis_query_cmplt_type *resp;
struct net_device *net;
- const struct net_device_stats *stats;
+ const struct rtnl_link_stats64 *stats;
if (!r) return -ENOMEM;
resp = (rndis_query_cmplt_type *) r->buf;
diff --git a/firmware/Makefile b/firmware/Makefile
index 243409f..6e0dd3e 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -42,7 +42,7 @@ fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
cxgb3/t3c_psram-1.1.0.bin \
- cxgb3/t3fw-7.4.0.bin \
+ cxgb3/t3fw-7.10.0.bin \
cxgb3/ael2005_opt_edc.bin \
cxgb3/ael2005_twx_edc.bin \
cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
new file mode 100644
index 0000000..96399d8
--- /dev/null
+++ b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
@@ -0,0 +1,1935 @@
+:1000000060007400200380002003700000001000D6
+:1000100000002000E100028400070000E1000288E7
+:1000200000010000E0000000E00000A0010000006E
+:1000300044444440E3000183200200002001E0002A
+:100040002001FF101FFFD0001FFFC000E300043C91
+:100050000200000020006C841FFFC2A020006CCCB6
+:100060001FFFC2A420006D0C1FFFC2A820006D80DE
+:100070001FFFC2AC200003C0C00000E43100EA3121
+:1000800000A13100A03103020002ED306E2A05000C
+:10009000ED3100020002160012FFDBC03014FFDA5F
+:1000A000D30FD30FD30F03431F244C107249F0D347
+:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
+:1000C000D30FD30F03431F244C107249F0D30FD327
+:1000D0000FD30F14FFCE03421F14FFCB03421F1296
+:1000E000FFCCC0302D37302D37342D37382D373CED
+:1000F000233D017233ED00020012FFC4C0302F37E0
+:10010000002F37102F37202F3730233D017233ED6A
+:1001100000020012FFBEC0302737002737102737F4
+:1001200020273730233D017233ED03020012FFB95F
+:1001300013FFBA0C0200932012FFB913FFB90C028F
+:1001400000932012FFB8C0319320822012FFB71312
+:10015000FFB7932012FFB715FFB316FFB6C030D715
+:100160002005660160001B00000000000000000088
+:10017000043605000200D30FD30F05330C6E3B1479
+:100180000747140704437631E604360505330C6F40
+:100190003BED00020012FFA615FFA3230A00D720A3
+:1001A000070443043E0505330C0747146F3BF00377
+:1001B000020012FFA1C03014FFA1D30FD30FD30F41
+:1001C0009340B4447249F2D30FD30FD30F14FF9B63
+:1001D000834014FF9B834012FF9B230A0014FF9A65
+:1001E000D30FD30FD30F9340B4447249F2D30FD33C
+:1001F0000FD30F14FF95834012FF95C92F832084DE
+:10020000218522BC22743B0F8650B4559630B433FE
+:100210007433F463FFE60000653FE1655FDE12FFC3
+:100220007C230A0028374028374428374828374C91
+:10023000233D017233ED03020000020012FF7AC079
+:1002400032032E0503020012FF7813FF819320C0B2
+:1002500011014931004831010200C00014FF7E0441
+:10026000D23115FF7D945014FF7D04D33115FF7CEE
+:10027000945014FF7C04D43115FF7C24560014FFE5
+:100280007B04D53115FF7B24560010FF7A03000054
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B000000000000000000000000000000000003D
+:1003C000000000000000000000000000000000002D
+:1003D000000000000000000000000000000000001D
+:1003E000000000000000000000000000000000000D
+:1003F00000000000000000000000000000000000FD
+:1004000000000000000000000000000000000000EC
+:1004100000000000000000000000000000000000DC
+:1004200063FFFC000000000000000000000000006E
+:100430000000000000000000000000001FFC0000A1
+:100440001FFC0000E30005C81FFC00001FFC0000AB
+:10045000E30005C81FFC00001FFC0000E30005C806
+:100460001FFFC0001FFFC000E30005C81FFFC00042
+:100470001FFFC018E30005C81FFFC0181FFFC018EA
+:10048000E30005E01FFFC0181FFFC294E30005E072
+:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
+:1004A0001FFFC59CE300085C200000002000016ADB
+:1004B000E3000B582000018020000180E3000CC401
+:1004C0002000020020000203E3000CC42000021CF4
+:1004D00020000220E3000CC8200002202000022699
+:1004E000E3000CCC2000023C20000240E3000CD4CE
+:1004F0002000024020000249E3000CD82000024CFA
+:1005000020000250E3000CE42000025020000259B9
+:10051000E3000CE82000025C20000260E3000CF421
+:100520002000026020000269E3000CF82000026C49
+:1005300020000270E3000D04200002702000027908
+:10054000E3000D082000028C2000028CE3000D1453
+:100550002000029020000293E3000D14200002AC62
+:10056000200002B0E3000D18200002D0200002F2AB
+:10057000E3000D1C200003B0200003B0E3000D4099
+:10058000200003B0200003B0E3000D40200003B0C2
+:10059000200003B0E3000D40200003B0200003B0B2
+:1005A000E3000D40200003B020006EA4E3000D40E6
+:1005B00020006EA420006EA4E30078340000000048
+:1005C00000000000000000001FFC00001FFC0000F5
+:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
+:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
+:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
+:10060000003FFFFF8040000010000000080FFFFFC8
+:100610001FFFC27D000FFFFF804FFFFF8000000023
+:1006200000000880B000000560500000600000007D
+:1006300040000011350000004100000010000001E2
+:100640002000000000001000400000000500000035
+:10065000800000190400000000000800E100020012
+:1006600010000005806000007000000020000009FC
+:10067000001FF8008000001EA0000000F80000002D
+:1006800007FFFFFF080000001800000001008001C4
+:10069000420000001FFFC22D1FFFC0EC00010080C0
+:1006A000604000001A0000000C0000001000000A6A
+:1006B000000030000001000080000018FC00000075
+:1006C0008000000100004000600008008000001C65
+:1006D0008000001A030000008000040004030403EB
+:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
+:1006F000FFFFF000000016D00000FFF7A50000008B
+:100700001FFFC4C01FFFC4710001000800000B20C0
+:10071000202FFF801FFFC46500002C00FFFEFFF8A4
+:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
+:100730000000FFEF010011001FFFC3E21FFFC5A073
+:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
+:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
+:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
+:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
+:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
+:10079000000100817FFFFFFFE1000600000027103D
+:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
+:1007B0000003D0901FFFC5742B5063802B507980AD
+:1007C0002B5090802B50A6801FFFC4790100110F81
+:1007D000202FFE0020300080202FFF000000FFFFB0
+:1007E0000001FFF82B50B2002B50B208000100109E
+:1007F0002B50B1802B50B2802B50BA000001001159
+:100800002B50BD282B50BC802B50BDA020300000A9
+:10081000DFFFFE005000000200C0000002000000E8
+:10082000FFFFF7F41FFFC07C000FF800044000003A
+:10083000001000000C4000001C400000E00000A080
+:100840001FFFC5501FFD00081FFFC5641FFFC578AF
+:100850001FFFC58CE1000690E10006EC00000000DF
+:100860000000000000000000000000000100000087
+:100870000000000000000000000000002010004008
+:10088000201000402010004020140080200C0000A8
+:10089000200C0000200C00002010004020140080DC
+:1008A0002014008020140080201800C0201C0100AB
+:1008B000201C0100201C010020200140201800C045
+:1008C000201800C0201800C0201C0100201800C003
+:1008D000201800C0201800C0201C0100202001406A
+:1008E00020200140202001402020094020200940F4
+:1008F000202009402020094020240980FFFFFFFF1D
+:10090000FFFFFFFFFFFFFFFF0000000000000000EF
+:1009100000000000000000000000000020005588DA
+:1009200020005458200055882000558820005394FA
+:100930002000539420005394200051D4200051D41F
+:10094000200051CC2000513820004FE020004DC045
+:1009500020004B94000000000000000020005558CB
+:1009600020005424200054C8200054C82000527C89
+:100970002000527C2000527C2000527C2000527CBF
+:10098000200051C42000527C20004F0020004D70F8
+:1009900020004B40000000000000000020000BF091
+:1009A00020003ADC200004C02000473020000BE883
+:1009B000200041F4200003F0200046F020004B1CF2
+:1009C00020003F0020003E1C20003A58200038E85C
+:1009D00020003658200031B820003C7820002DD06F
+:1009E0002000286420006828200023F0200020D068
+:1009F0002000207C20001D68200018602000158841
+:100A000020000E5420000C3420001134200013204C
+:100A1000200043EC20003EB420000BF8200004C06E
+:100A200000000000000000000000000000000000C6
+:100A300000000000000000000000000000000000B6
+:100A400000000000000000000000000000000000A6
+:100A50000000000000000000000000000000000096
+:100A60000000000000000000000000000000000086
+:100A70000000000000000000000000000000000076
+:100A80000000000000000000000000000000000066
+:100A90000000000000000000000000000000000056
+:100AA0003264000000000000326400006400640052
+:100AB00064006400640064006400640000000000DE
+:100AC0000000000000000000000000000000000026
+:100AD0000000000000000000000000000000000016
+:100AE0000000000000000000000000000000000006
+:100AF00000000000000000000000000000000000F6
+:100B000000000000000010000000000000000000D5
+:100B100000000000000000000000000000001000C5
+:100B200000000000000000000000000000000000C5
+:100B300000432380000000000000000000000000CF
+:100B400000000000000000000000000000000000A5
+:100B50000000000000000000005C94015D94025E53
+:100B600094035F94004300000000000000000000B8
+:100B70000000000000000000000000000000000075
+:100B80000000000000000000000000000000000065
+:100B90000000000000000000005C90015D90025E1B
+:100BA00090035F9000530000000000000000000070
+:100BB0000000000000000000000000000000000035
+:100BC0000000000000000000000000000000000025
+:100BD0000000000000000000009C94001D90019D9A
+:100BE00094029E94039F94040894050994060A9421
+:100BF000070B94004300000000000000000000000C
+:100C000000000000000000000000000000000000E4
+:100C10000000000000000000009C90019D90029EDA
+:100C200090071D90039F90047890057990067A9024
+:100C3000077B90005300000000000000000000004F
+:100C400000000000000000000000000000000000A4
+:100C5000000000000000000000DC94001D9001DD99
+:100C60009402DE9403DF940404940505940606942C
+:100C70000707940808940909940A0A940B0B940036
+:100C80004300000000000000000000000000000021
+:100C9000000000000000000000DC9001DD9002DE9A
+:100CA000900B1D9003DF9004B49005B59006B690AC
+:100CB00007B79008B89009B9900ABA900BBB90009A
+:100CC0005300000063FFFC0020006C6010FFFF0A6F
+:100CD0000000000020006C8400D23110FFFE0A00EA
+:100CE0000000000020006CCC00D33110FFFE0A0091
+:100CF0000000000020006D0C00D43110FFFE0A003F
+:100D00000000000020006D8000D53110FFFE0A00B9
+:100D10000000000063FFFC00E00000A012FFF7826B
+:100D200020028257C82163FFFC12FFF303E830045E
+:100D3000EE3005C03093209421952263FFFC000023
+:100D40001FFFD000000400201FFFC5A01FFFC6909A
+:100D5000200A0011FFFB13FFFB03E631010200161E
+:100D6000FFFA17FFFAD30F776B069060B4667763CC
+:100D7000F85415F3541AA50F140063FFF90000008E
+:100D80006C1004C020D10F006C1004C0C71AEF060D
+:100D9000D830BC2BD72085720D4211837105450BCD
+:100DA000957202330C2376017B3B04233D0893713B
+:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
+:100DC000088202280A01038E380E0E42C8EE29A6B8
+:100DD0007E6D4A0500208800308C8271D10FC0F0F2
+:100DE000028F387FC0EA63FFE400C0F1C050037E89
+:100DF0000CA2EE0E3D1208820203F538050542CB27
+:100E00005729A67E2FDC100F4F366DFA050020887B
+:100E100000308CBC75C03008E208280A0105833810
+:100E2000030342C93E29A67E0D480CD30F6D8A05E7
+:100E300000208800B08C8271D10FC05008F5387541
+:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
+:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
+:100E6000221DC0D07B81352920060BB702299CFAB0
+:100E7000655008282072288CFF2824726491642A07
+:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
+:100E90003FC1CE7CA13669AC336000370029200603
+:100EA000D7D0299CFACC57282072288CFF2824728E
+:100EB0006491392AD0000CA80C6481680EA90C64D6
+:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
+:100ED0000F2D25028A32C0900A6E5065E5B529248F
+:100EE00067090F4765F5B12C200C1FEEB30CCE112E
+:100EF000AFEE29E286B44879830260058219EEAF2D
+:100F000009C90A2992A36890078F2009FF0C65F58B
+:100F10006E2FE28564F56865559628221D7B810554
+:100F2000D9B060000200C0908B9417EEA50B881416
+:100F300087740B0B47A87718EEA309BB100877023C
+:100F400097F018EEA117EEA208A8010B8802074738
+:100F5000021BEE9E97F10B880298F22790232B90AC
+:100F60002204781006BB1007471208BB0228902104
+:100F70000777100C88100788020B880217EE968BF3
+:100F80003307BB0187340B880298F3979997F48B4A
+:100F90009587399BF588968B3898F688979BF897B4
+:100FA000F998F717EE8D28E28507C7082D74CF084A
+:100FB000480B28E68565550F2B221E28221D7B89AC
+:100FC000022B0A0064BF052CB00728B000DA200607
+:100FD000880A28824CC0D10B8000DBA065AFE76394
+:100FE000FEEA0000292072659E946004E72A2072C0
+:100FF00065AEBF6004DE00002EB0032C2067D4E095
+:1010000065C1058A328C330AFF500C4554BC5564C7
+:10101000F4EB19EE72882A09A90109880C64821F71
+:10102000C0926000DD2ED0032A2067D4E065A0D8EE
+:101030008A328B330AFC500B4554BC5564C4BE192C
+:10104000EE67882A09A9017989D50BEA5064A4E3DF
+:101050000CEE11C0F02F16132E16168AE78CE82A14
+:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
+:101070006583468837DBC0AE89991E789B022BCCEE
+:10108000012B161B29120E2B0A0029161A7FC307E3
+:101090007FC9027EAB01C0B165B49D8B352F0A00BC
+:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
+:1010B00012162B1619005104C0C100CC1A2CCCFFFB
+:1010C0002C16170CFC132C16182B121A2A121BDCC8
+:1010D000505819B6C0D0C0902E5CF42C12172812AC
+:1010E000182F121B2A121A08FF010CAA01883407B4
+:1010F0004C0AAB8B2812192BC6162F86082A860994
+:101100002E74102924672E70038975B1EA2A74039E
+:10111000B09909490C659DB42B20672D250265B354
+:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
+:10113000B00728B000DA2006880A28824CC0D10BFC
+:101140008000DBA065AFE763FD8289BAB199659045
+:101150009788341CEE2398BA8F331EEE1C0F4F5421
+:101160002FB42C8D2A8A320EDD020CAC017DC966AB
+:101170000A49516F92608A3375A65B2CB0130AED51
+:10118000510DCD010D0D410C0C417DC9492EB01200
+:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
+:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
+:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
+:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
+:1011D000EB01C0B164B161C091292467C020D10F77
+:1011E00000008ADAB1AA64A0C02C20672D25026510
+:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
+:1012000065D28A0A4E516FE202600281C0902924A1
+:1012100067090F4765F2F828221D7B89022B0A0017
+:1012200064BCA92CB00728B000DA2006880A2882FE
+:101230004CC0D10B8000DBA065AFE763FC8E0000E3
+:101240000CE9506492ED0CEF11C080281611AFBF6D
+:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
+:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
+:1012700089991C789B022CEC012C161B29120C2C32
+:101280000A0029161A7AE3077AE9027FBB01C0C176
+:1012900065C2A58B352C0A002A0A007AE30564E1B1
+:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
+:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
+:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
+:1012D000920263FF018A330AAB5064BEF92CD0132B
+:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
+:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
+:101300008A362FD2097CA3077AC9027EFB01C0B1BD
+:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
+:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
+:10133000027DEB01C0C165CE9DC090292467C0200D
+:10134000D10F88378C3698140CE90C29161408F83C
+:101350000C981D78FB07281214B088281614891DD4
+:101360009F159B16C0F02B121429161A2B161B8BD7
+:10137000147AE30B7AE90688158E1678EB01C0F132
+:1013800065F1BA29121A2F12118A352E121B9A1AD8
+:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
+:1013A000881AC0F098107AE30A7EA9052A12017AF9
+:1013B0008B01C0F164F08160018389368B37991706
+:1013C0000BE80C981F09C90C29161578EB07281291
+:1013D00015B088281615D9C09A199E188A1F2E1282
+:1013E000152A161A2E161BDAC0C0E08C177F930B35
+:1013F0007FA90688188F1978FB01C0E165E13E29B5
+:10140000121A2F12138A352E121B9A1BAFEE2F12AF
+:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
+:1014200098127AE30A7EA9052A12037A8B01C0F189
+:1014300065F10A2E12162E16192A121B005104C02D
+:10144000E100EE1AB0EE2E16170EFF132F16180F2E
+:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
+:10146000AA2A161B2C161A63FC5E00007FB30263C7
+:10147000FE3163FE2B7EB30263FC3063FC2A000066
+:101480006450C0DA20DBC058168AC020D10FC0914A
+:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
+:1014A0000A80C09A2924682C7007581575D2A0D1DB
+:1014B0000F03470B18ED4DDB70A8287873022B7DC6
+:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
+:1014D00063FAE4000029221D2D25027B9901C0B08A
+:1014E000C9B62CB00728B000DA2006880A28824C3A
+:1014F000C0D10B8000DBA065AFE7C020D10FC09149
+:1015000063FBFF00022A0258024C0AA202060000F6
+:10151000022A025802490AA202060000DB70DA2001
+:10152000C0D12E0A80C09E2924682C7007581554FB
+:10153000C020D10FC09463FBC9C09663FBC4C096A2
+:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
+:10155000C2A02AB4002C200C63FF27008D358CB765
+:101560007DCB0263FDD263FC6D8F358ED77FEB029E
+:1015700063FDC563FC6000006C1004C020D10F0047
+:101580006C1004C020D10F006C10042B221E2822E6
+:101590001DC0A0C0942924062A25027B8901DBA056
+:1015A000C9B913ED04DA2028B0002CB00703880A6B
+:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
+:1015C0000F0000006C10042C20062A210268C805B8
+:1015D00028CCF965812E0A094C6591048F30C1B879
+:1015E0000F8F147FB00528212365812716ECF3297E
+:1015F000629E6F98026000F819ECEF2992266890BD
+:10160000078A2009AA0C65A0E72A629D64A0E12B45
+:10161000200C0CB911A6992D92866FD9026000DBBF
+:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
+:101630000C65E0C7279285C0E06470BF1DECEC68C4
+:10164000434E1CECEB8A2B0CAA029A708920089955
+:10165000110D99029971882A98748F329F752821EB
+:1016600004088811987718ECDC0CBF11A6FF2DF246
+:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
+:1016800074DB40580E81D2A0D10FC020D10F0000D2
+:101690000029CCF96490B12C20668931B1CC0C0CB6
+:1016A000472C24666EC60260008509F85065807F6D
+:1016B0001CECD18A2B0F08400B881008AA020CAA38
+:1016C000029A7089200899110D99029971883398AE
+:1016D000738C329C728A2A9A748934997563FF7D5F
+:1016E00000CC57DA20DB30DC4058155FC020D10F2A
+:1016F00000DA20C0B65815EE63FFE500DA20581571
+:10170000EC63FFDC00DA20DB30DC40DD5058167A79
+:10171000D2A0D10FC858DA20DB305814C72A2102D2
+:1017200065AFBDC09409A90229250263FFB200007C
+:101730002B21045814731DECADC0E02E24668F30AD
+:101740002B200C0F8F1463FF66292138C088798302
+:101750001F8C310CFC5064CF562B2104C0C0581490
+:10176000681DECA2C0E08F302B200C0F8F1463FF9C
+:101770003E2C20662B2104B1CC0C0C472C2466583F
+:1017800014601DEC9AC0E02E24668F302B200C0FC5
+:101790008F1463FF1A0000006C1004C0B7C0A116BC
+:1017A000EC9615EC88D720D840B822C04005350209
+:1017B0009671957002A438040442C94B1AEC7B1947
+:1017C000EC7C29A67EC140D30F6D4A0500808800BD
+:1017D000208C220A88A272D10FC05008A53875B09B
+:1017E000E363FFD76C10069313941129200665520A
+:1017F00088C0716898052A9CF965A29816EC6F2933
+:1018000021028A1309094C6590CD8AA00A6A512ADF
+:10181000ACFD65A0C2CC5FDB30DA208C115815120C
+:10182000C0519A13C7BF9BA98E132EE20968E060CE
+:101830002F629E1DEC606FF8026000842DD2266836
+:10184000D0052F22007DF9782C629DC79064C0706E
+:101850009C108A132B200C2AA0200CBD11A6DD0A97
+:101860004F14BFA809880129D286AF88288C09792E
+:101870008B591FEC520FBF0A2FF2A368F0052822E4
+:10188000007F894729D285D4906590756000430018
+:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
+:1018A000BF0A6E96102FF2A368F00488207F890586
+:1018B00029D285659165DA2058157DC95C6001FFE4
+:1018C00000DA20C0B658157A60000C00C09063FFA3
+:1018D000B50000DA205815766551E48D138C11DBC4
+:1018E000D08DD0022A020D6D515813E39A1364A1D2
+:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
+:10190000FD00C091C0F12820062C2066288CF9A784
+:10191000CC0C0C472C24666FC6098D138DD170DE5C
+:1019200002290A00099D02648159C9D38A102B211A
+:10193000045813F38A13C0B02B24662EA2092AA0E0
+:10194000200E28141CEC298D1315EC1DC1700A778C
+:101950003685562DDC28AC2C9C12DED0A8557CD3C5
+:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
+:10197000D4A028200CB455C0D02B0A882F0A800C84
+:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
+:1019900012DEF0AC882D84CF28120229120378F3CE
+:1019A000022EFDF8289020D3E007880CC1700808AB
+:1019B00047289420087736657FAB891313EC10898C
+:1019C00090C0F47797491BEC0EC1CA2821048513F7
+:1019D000099E4006EE11875304881185520E880235
+:1019E0000C88029BA09FA18F2B9DA598A497A795DB
+:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
+:101A00001106CC082BC2852DE4CF2BBC202BC6851C
+:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
+:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
+:101A300088201EEBE38F138EE48FF40888110A8848
+:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
+:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
+:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
+:101A70002F24722C2502C020D10F871387700707EF
+:101A80004763FD6E282138C099798B0263FE9ADD89
+:101A9000F063FE9500DA20DB308C11DD505815968E
+:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
+:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
+:101AC000C020D10F6C1006292102C0D07597102AB2
+:101AD00032047FA70A8B357FBF052D25020DD90261
+:101AE000090C4C65C18216EBB41EEBB228629EC095
+:101AF000FA78F30260018829E2266890078A2009B3
+:101B0000AA0C65A17A2A629DDFA064A1772B200C24
+:101B10000CBC11A6CC29C286C08C79830260015707
+:101B200019EBA709B90A2992A368900788200988A8
+:101B30000C65814327C2851CEBA964713A89310980
+:101B40008B140CBB016FB11D2C20669F10B1CC0C07
+:101B50000C472C24666EC60260014009FF5065F1F7
+:101B60003A8A102AAC188934C0C47F973C18EBA974
+:101B70001BEBA88F359C719B708B209D7408BB025A
+:101B80009B72C08298751BEBA40F08409B730F8853
+:101B90001198777FF70B2F2102284A0008FF022FA8
+:101BA0002502C0B4600004000000C0B07E97048F1E
+:101BB000362F25227D970488372825217C9736C02B
+:101BC000F1C0900AF9382F3C200909426490861927
+:101BD000EB7618EB7728967E00F08800A08C00F05A
+:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
+:101BF0002AAC182A669D89307797388F338A321835
+:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
+:101C10008498E1882B9DE59AE69FE71AEB78099F67
+:101C20004006FF110FCC020A880298E2C1FC0FCCDB
+:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
+:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
+:101C5000CF5CC020D10FC081C0900F8938C0877978
+:101C6000880263FF7263FF6600CC57DA20DB30DC4A
+:101C7000405813FDC020D10FDA2058148D63FFE8BF
+:101C8000C0A063FE82DA20C0B658148963FFD90071
+:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
+:101CA000045813171EEB44C0D02D246663FEB10008
+:101CB0006C1006D62019EB3F1EEB4128610217EB92
+:101CC0003E08084C65805F8A300A6A5169A3572B29
+:101CD000729E6EB83F2A922668A0048C607AC9343E
+:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
+:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
+:101D00002C160068E0052F62007EF91522D285CFDF
+:101D10002560000D00DA60C0B6581465C85A60012D
+:101D20000F00DA60581462655106DC40DB308D30FC
+:101D3000DA600D6D515812D0D3A064A0F384A1C015
+:101D40005104044763FF6D00C0B02C60668931B157
+:101D5000CC0C0C472C64666FC60270960A2B61048B
+:101D60005812E7C0B02B64666550B42A3C10C0E737
+:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
+:101D8000EB0A18EB0B28967E8D106DDA0500A08803
+:101D900000C08CC0A089301DEB1A77975388328C15
+:101DA000108F3302CE0BC02492E12261049DE00427
+:101DB00022118D6B9BE59FE798E61FEB1009984079
+:101DC0000688110822020FDD02C18D9DE208220261
+:101DD00092E4B4C22E600C1FEB000CE811A7882C13
+:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
+:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
+:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
+:101E1000C0F00ADF387FE80263FF6C63FF600000F8
+:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
+:101E300063C020D10F0000006C10042920062A2264
+:101E40001EC0392C221D232468C0307AC107DDA0B2
+:101E5000600004000000C0D06E9738C08F2E0A804A
+:101E60002B2014C0962924060EBB022E21022B24FF
+:101E7000147E8004232502DE307AC10EC8ABDBD08D
+:101E8000DA202C0A00580B062E21020E0F4CC8FE39
+:101E90006000690068956528210208084C65805C2F
+:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
+:101EB0002668B0048C207BC95329A29D1FEAC16407
+:101EC000904A9390C0C31DEAD52B21049D9608BB70
+:101ED000110CBB029B979B911CEAD2C08523E4A204
+:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
+:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
+:101F00000F8EF912EAC82E2689C020D10FDA20C020
+:101F1000B65813E7C020D10F6C10062A2006941083
+:101F200068A80528ACF965825029210209094C6589
+:101F3000920ACC5FDB30DA208C1058134BC051D39F
+:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
+:101F50008F3A16EA99B1FB64B13128629E6F88020C
+:101F60006001ED294C332992266890078A2009AA3E
+:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
+:101F8000B7110677082972867983026001CD0CB9F2
+:101F90000A2992A36890082C220009CC0C65C1BBC9
+:101FA0002772856471B5282006288CF96481E52C98
+:101FB00020668931B1CC0C0C472C24666EC60260B9
+:101FC00001A109F85065819B2A21048CE488361E02
+:101FD000EA7D088914A9CC08084709881019EA92F3
+:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
+:101FF000CC029C722E302C293013283012049910F8
+:102000000688100CEE109F740EAE0209880208EECE
+:10201000029E738C3704AA119C758938C0F4997696
+:102020008839C0C1987718EA828E359C7B9E780EDD
+:102030008E1408EE029E7A8E301CEA7177E73088A3
+:102040003289339C7C9F7D0E9C4006CC118F2B29BE
+:1020500076132D76112876120CAA0218EA68C1C9E7
+:102060000CAA022A761008FF029F7EC0AA60000117
+:10207000C0A6A4BC0CB911A6992892852DC4CF087E
+:10208000A80B289685655100C020D10F2B200C0C81
+:10209000B7110677082A72860CB90A6FA902600187
+:1020A000182992A36890082A220009AA0C65A109A0
+:1020B0002A728564A1032C203D0C2C4064C08C8CBA
+:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
+:1020D0001464C0777CF374283013C0FC78F86CC0AB
+:1020E00090292467090C4765C0D719EA4718EA45C3
+:1020F0008F208C3508FF110C8C1408FF0288E49F98
+:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
+:102110008809880298A218EA3DA4BC2F72852DC4B4
+:10212000CF2FFC102F76852F210229207208FF0265
+:10213000B2992924722F2502C020D10F00CC57DA82
+:1021400020DB308C105812C8C020D10FC09163FF23
+:102150008FDA20C0B658135663FFE100DA20581317
+:102160005463FFD82B21045811E61EEA152B200CCE
+:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
+:10218000DD505813DDD2A0D10F2A2C748B10580BC0
+:10219000BED2A0D10F292138C08879832E8C310C72
+:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
+:1021B000EA048F3A2B200C63FE0DDA2058133C639F
+:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
+:1021D0002104B1CC0C0C472C24665811C91EE9F817
+:1021E0002B200CC0D02D24668F3A63FDDA0000004E
+:1021F0006C10089514C061C1B0D9402A203DC04080
+:102200000BAA010A64382A200629160568A8052C9D
+:10221000ACF965C33F1DE9EA6440052F120464F27E
+:10222000A02621021EE9E606064C6562E615E9E2F3
+:102230006440D98A352930039A130A990C6490CCEA
+:102240002C200C8B139C100CCC11A5CC9C112CC2F7
+:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
+:10226000A368E0098620D30F0E660C6562C2881150
+:102270002882856482BA891364905EDA80D9308CB2
+:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
+:102290007FB718B88A293C10853608C6110E660229
+:1022A0009681058514A5D50F550295800418146DE7
+:1022B0008927889608CB110888140EBB02A8D82954
+:1022C0009C200F88029BA198A088929BA308881449
+:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
+:1022E000131EE9BD86118D10286285AEDD08FF0B37
+:1022F0002CD4CF2821022F66858B352A207209889D
+:1023000002ABAA2825022A2472C020D10F29529E8E
+:1023100018E9A96F980260020B28822668800829B4
+:10232000220008990C6591FC2A529DC1CE9A126434
+:10233000A1F22B200C2620060CB8110588082D824E
+:10234000860EBE0A7DC3026002052EE2A368E00885
+:102350002F22000EFF0C65F1F6288285D780DE80E3
+:102360006482009816266CF96462012C206688311C
+:102370002CCC010C0C472C24666EC6026001BC08F4
+:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
+:10239000048B2D2830102F211D0C88100BFB090AEF
+:1023A00088020988020CBB026441529B709D71989F
+:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
+:1023C0007FD714273C10BCE92632168C3996E69C40
+:1023D000E78A37B4382AE6080B131464304A2A8295
+:1023E0001686799A9696978C778A7D9C982B821779
+:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
+:10240000086DB9218BC996A52692162AAC18B899E1
+:102410009BA196A08BC786CD9BA22B921596A49BC1
+:10242000A386CB2CCC2026A605C0346BD4200D3B34
+:102430000C0DD8090E880A7FB705C0909988BC8812
+:10244000C0900B1A126DAA069988998B288C18C017
+:10245000D01BE97A1CE97916E96EB1FF2A211C2309
+:10246000E6130F0F4F26E6122F251D7FA906C0F099
+:10247000C08028251D05F6111AE9678F202BE61567
+:102480002CE6162DE61726E6180AFA022AE6142983
+:102490002006299CF96490F829200C8D14C0801A1C
+:1024A000E94E0C9C11AA99A5CCDA202BC285289460
+:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
+:1024C000D10F8A356FA546D8308BD56DA90C8A8679
+:1024D0000A8A14CBA77AB335288C10C080282467C9
+:1024E000080B4765B10BDA20DB302C12055811DEE2
+:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
+:102500006461059B709D719872C04D63FEA4C0818B
+:1025100063FFC9008814CC87DA20DB308C15581192
+:10252000D2C020D10FDA20C0B658126163FFE40098
+:1025300000DA208B1058125E63FFD8009E178A12B3
+:102540002B21045810EF8E17C09029246663FE34A7
+:10255000C08063FE06DA20DB308C15DD505812E6B1
+:10256000D2A0D10FDA2058125263FFA7002B2138D6
+:10257000C0A87BAB026001048C310CFC5064CE041B
+:102580008A122B2104C0C098175810DD8E1763FDE6
+:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
+:1025A00028206A7F87050826416460A3C09016E949
+:1025B000141CE9232A200723E61BB1AA0CFD0226DE
+:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
+:1025D0001C8B260A0A472BE6208B282AE53E2BE691
+:1025E000212924072820062A2064688346B44463EE
+:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
+:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
+:1026100032162B76129D712D761328761489960A20
+:102620002A14AA990C9902997069ED71C14663FD4B
+:102630008100000064AFB51DE8E22C20168DD20A9F
+:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
+:102650002B21046EB81E2C2066B8CC0C0C472C2401
+:1026600066C9C09E178A125810A68E17C0348F20D4
+:10267000C0D02D2466C06826240663FF2E8A122B44
+:1026800021042C20669817B1CC0C0C472C246658DA
+:10269000109C8E178716C0D02D246663FCE68D35FE
+:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
+:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
+:1026C0001027D63006460127D6320A440117E8DF24
+:1026D00024D631A74727D63324F21596B794B68D62
+:1026E000C3BCBB9DB58D35299C107D83C22F211D98
+:1026F000C14663FD330000006C1006292006289CAB
+:10270000F86582BF2921022B200C09094C6590E154
+:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
+:102720000260028C19E8A609B90A2992A3689007E9
+:102730008C2009CC0C65C27829A2856492722D6226
+:102740009E1AE89C6FD80260026E2AA22629160102
+:1027500068A0082B22000ABB0C65B25C29629DC1EF
+:102760008C6492542A21200A806099102C203CC746
+:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
+:10278000260DBD112DDC1C0D0D410EDD038E27B174
+:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
+:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
+:1027B000A16000093E01073EB1780987390B770A0D
+:1027C00077EB0260020A2C2123282121B1CC0C0CCA
+:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
+:1027E000DB30581095292102CC96C0E80E9E022EAF
+:1027F0002502CC57DA20DB30DC4058111BC020D139
+:102800000F2C20668931B1CC0C0C472C24666EC687
+:10281000026001D309FD5065D1CD2F0A012E301180
+:1028200029221464E01128221B090C4400C1040071
+:10283000FA1A0A880228261B2E3010C0A0C0B094B5
+:102840001295131CE85F88302CC022088D147787FE
+:1028500004C0F10CFA38C041C0F225203CC0840805
+:1028600058010F5F010F4B3805354007BB10C0F012
+:10287000084F3808FF100FBB0228ECFEC0F0084FCD
+:1028800038842B0BA8100AFF102A21200F88020B76
+:10289000880208440218E86E8F1108440228212596
+:1028A0000A2A140828140488110A88022A21049488
+:1028B000F08B2004E41008BB1104BB02C04A04BB27
+:1028C000029BF1842A08AB110BEB0294F40A541119
+:1028D0000B44020555100D1B4094F707BB100B5518
+:1028E00002085502C08195F68433C05094F3B19428
+:1028F0008B3295F898F99BF2C080C1BC24261499BC
+:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
+:10291000FE883998FF853525F6108436851324F610
+:10292000118B3784122BF612C0B064C07E893077C9
+:1029300097438D3288332E30108F111CE83109995E
+:10294000400699112CF614C0C42CF6158C2B2DF6CC
+:102950001A28F61B2BF61904A81109880208EE02A2
+:1029600019E827C18008EE0209C90229F6162EF6D9
+:1029700018C09E600001C09A2F200C18E8170CFEAA
+:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
+:1029900085C87F8A268929A7AA9A260A990C090937
+:1029A00048292525655050C020D10F00C09A63FFEB
+:1029B000C6DA2058113F63FE38DA20C0B658113C01
+:1029C00063FE2E0068973C2B9CFD64BE24C020D182
+:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
+:1029E000DC3865CDE063FE098A102B2104580FC442
+:1029F000C0B02B246663FE21DB402A2C745809A248
+:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
+:102A100020D10F006C1004290A801EE80E1FE80E5A
+:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
+:102A3000029ED19CD0C051C07013E80A14E8091856
+:102A4000E8072AB285A82804240A234691A986B853
+:102A5000AA2AB685A98827849F25649FD10F0000E4
+:102A60006C100AD630283010292006288CF9648290
+:102A70009B68980B2A9CF965A1B2022A02580FABF9
+:102A800089371BE7CFC89164520E2A21020A0C4CE9
+:102A900065C2588D3019E7C874D7052E212365E229
+:102AA0009E2F929E1AE7C46FF8026002532AA22654
+:102AB00068A0082C22000ACC0C65C2442A929D64AE
+:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
+:102AD00018E7BC64B0052880217B8B432B200C18A1
+:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
+:102AF0002EE2A368E0052F22007EF9372CC2859CC8
+:102B00001864C2332B212F87660B7B360B790C6F31
+:102B10009D266ED2462C203D7BC740CE5560001EC0
+:102B20002A200CC1B28C205811229A1864A2458D1B
+:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
+:102B4000E06000022E60030EDB0C6EB20EDC700C37
+:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
+:102B6000C1C82D21205810BC8C268B279A160CBB6F
+:102B70000C7AB3348F18896399F3886298F28E6562
+:102B80009EF82D60108A189D1768D729C0D09DA97E
+:102B90002C22182B22139CAB9BAA97A58E667E73C2
+:102BA00002600097CF5860001FDA208B1658108201
+:102BB00065A13863FFBDC081C0908F18C0A29AF98B
+:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
+:102BD0001026C051D6A0C0C02BA0102CA4039B1758
+:102BE0002C1208022A02066B02DF702D60038E177A
+:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
+:102C0000188C148B16ACAC2C64038A268929ABAAC9
+:102C10000A990C9A26886609094829252507880CEF
+:102C200098662F2218A7FF2F261863FE96DA20DB5E
+:102C300030DC40DD50581130D2A0D10FC0302C20F4
+:102C4000668961B1CC0C0C472C24666EC60260000C
+:102C5000D2C03009FD5065D0CA8E6764E0696470E7
+:102C600066DB608C18DF70DA202D60038E170CDDB8
+:102C7000119E10AD6D2DDC201EE7755800F923263E
+:102C800018DA208B16DC402F2213DD50B1FF2F26DF
+:102C900013580FC5D2A0D10F0028203D0848406529
+:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
+:102CB00014CACF7CD32D2AAC10C090292467090DEB
+:102CC0004764DDC5600092002C1208066B022D6C73
+:102CD00020077F028E17DA209E101EE75C58007DC9
+:102CE00063FF9A00C09163FFD1000000655081DA54
+:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
+:102D00000FDA20C0B658106A63FFE000006F95022A
+:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
+:102D2000D2A0D10F8A152B2104580EF52324662832
+:102D30006010981763FF2100DA2058105D63FFAB25
+:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
+:102D50009409A90229250263FF91DB30DC40DD5094
+:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
+:102D70000FC020D10FDA202B200C58107263FF6B8C
+:102D80006C1004282006C062288CF8658125C0508C
+:102D9000C7DF2B221BC0E12A206B29212300A104BD
+:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
+:102DB0002A246B04E4390DCC030CBB012B261B64C5
+:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
+:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
+:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
+:102DF000E71E64B09B8C2B2421040DCC029CB08870
+:102E000020C0C50888110C880298B1882A0844118E
+:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
+:102E20000E9E0825E4CF2DDC282DA6852921020938
+:102E3000094C68941A689820C9402A210265A00BA1
+:102E40002A221E2B221D7AB10265A079C020D10F43
+:102E50002C212365CFDE6000082E21212D21237E29
+:102E6000DBD52B221E2F221D2525027BF901C0B0A8
+:102E700064BFC413E6D02CB00728B000DA20038862
+:102E80000A28824CC0D10B8000DBA065AFE763FF4E
+:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
+:102EA000A08B2008BB1106BB029BA1893499A263A9
+:102EB000FF790000262468DA20DB30DC40DD505842
+:102EC000108ED2A0D10FDA202B200C580FF9C02081
+:102ED000D10F00006C1006073D14C080DC30DB40D1
+:102EE000DA20C047C02123BC3003283808084277C5
+:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
+:102F0000D30F6DDA0500508800308CC0E0C020255A
+:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
+:102F2000220F8940941077F704C081048238C0F1E1
+:102F30000B2810C044C02204540104FD3802520181
+:102F400002FE3808DD10821C07EE100E6E020EDD48
+:102F500002242CFEC0E004FE380AEE100E88020D9A
+:102F600088028DAB1EE69B08D8020E880298B0C07E
+:102F7000E80428100E5E0184A025A125084411084C
+:102F80004402052514045511043402C0810E8E3903
+:102F900094B18FAA84109FB475660C26A11FC0F24D
+:102FA000062614600009000026A120C0F20626149F
+:102FB0000565020F770107873905E61007781008C5
+:102FC000660206550295B625A1040AE611085811B5
+:102FD00008280208660296B7C060644056649053A1
+:102FE000067E11C0F489C288C30B340B96459847FE
+:102FF000994618E6829F410459110E99021FE680F6
+:10300000020E4708D80298420E99029F40C1E00E76
+:10301000990299442FA00CB4380CF91114E66F1ED4
+:10302000E666A4FFAE992E928526F4CF0E880B2873
+:103030009685D10F2BA00C1FE6601CE6670CBE1115
+:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
+:10305000D10FC08005283878480263FEA263FE962F
+:103060006C1006C0C06570F18830C03008871477D6
+:103070008712C0B0C0A619E652299022C030CC9762
+:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
+:103090008225203C0B3F109712831CC070085801FA
+:1030A0000D5D01089738C0800B98380777100488A9
+:1030B00010086802087702C0800D98382D3CFE0881
+:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
+:1030D0000CB8100FDD02053B400EDD029D4089203B
+:1030E000043D100899110D99022D210409A9020827
+:1030F000DD119941872A05B9100D3D020ABB110D5A
+:10310000BB02087702974428212587120828140457
+:103110008811071E4007EE100E99027566092621D8
+:103120001F062614600006002621200626140868C3
+:10313000029B47098802984629200CD2C0C0800C07
+:103140009E111BE6251FE61CAB99AFEE2DE28528EC
+:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
+:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
+:10317000981008770C9FD898D989538F5299119934
+:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
+:1031900088108D1108E70C9751AD8DD7F078DB01C1
+:1031A000B1F79D5397528830C03008871408884083
+:1031B000648ED565BEC963FEBC0000006C1004D7E8
+:1031C00020B03A8820C0308221CAA0742B1E2972F8
+:1031D000046D080FC980C9918575B133A2527A3B3D
+:1031E0000B742B0863FFE900649FECD10FD240D130
+:1031F0000F0000006C100AD6302E3027D950DA406C
+:1032000015E5F02430269A1529160464E00264932B
+:10321000732920062A9CF865A3CE2A2102270A04D6
+:103220000A0B4C65B3978C3074C7052D212365D4E8
+:10323000A0C0A62B0A032C2200580F3664A3B9178E
+:10324000E5DE8E389A1664E3BA2F6027285021C92C
+:10325000F37E8311C2B08C202A200C580F55D7A0C2
+:10326000CDA16004A200C2B08C202A200C580F29E6
+:10327000D7A064A4862F212E8B680FBF360FB90C00
+:103280006F9D54296027D5B06E920528203D7B8F15
+:103290004CDA20DB50C1C42D211F580EEF8B269A2B
+:1032A000189A1989272AAC380B990C7A9353896399
+:1032B000C08099738F6298789F728E659E798D67B2
+:1032C0009D7B8C6695759C7A8E687E53026000B1FA
+:1032D0008B1465B050600038DBF063FFA5008A14E2
+:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
+:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
+:10330000FFE2DA208B18580EAC65A2B163FF9E0075
+:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
+:103320002D16042CA403DC70DA20DB60DF502D6046
+:1033300003C0E09E109D171EE5B90CDD110D6D0850
+:103340002DDC285BFF478E668F678817AF5FA8A8C4
+:1033500028640375FB01B1EE8A189E669F67892673
+:103360008829AA9909880C99268E6808084805EECC
+:103370000C28252515E5939E6865EECC63FEE600D6
+:103380000000C9432F21232B21212FFC010F0F4FB8
+:103390002F25237FBB026003142C20668961B1CCEA
+:1033A0000C0C472C24666EC60260022809FD50658D
+:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
+:1033C00020DB601EE5AB2D6003C08098100CDD1182
+:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
+:1033E0008C202A200C580ECB0AA70265A00FC0B073
+:1033F0002C22002A200C580EC7D7A064AFEFDA2089
+:10340000C1BCC1C82D21208F188E268929AFEE9E00
+:10341000260E990C090948292525580E8FC090C001
+:1034200050C0C288609A191EE566C0A12EE022082D
+:103430008F14778704C0810E8938C0800B93102DBC
+:10344000203C2921200CDC0104DB010929140BA8F4
+:10345000380CA5380D3D401CE57E8B2B08881007E5
+:1034600055100855020533022821250F154003BBCE
+:10347000020CBB0207551005D3100828140ADD11F1
+:103480000488110988020533022921040833029BAC
+:1034900070C0808A201BE57708AA110BAA029A71D6
+:1034A000C0A1852A9376957408931103DD020ADD85
+:1034B000029D778C63C1DC9C738B6298789A799BB0
+:1034C00072232214C0C0B1352526149C7B9D7593B0
+:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
+:1034E00025621B957F2362172376102D62182D7697
+:1034F000112C62192C761264E0B98E6077E73DC01A
+:10350000FE13E53E1DE53FC1818A628B6304951180
+:103510000E9C4006CC110C5502247615085502C0AD
+:10352000802D76148D2B2B761B2A761A287619255A
+:10353000761803DD022D76166000030000C0FA2E17
+:10354000200C19E52518E51CA9E90CEE11A8EEC020
+:10355000802DE2852894CF0DFD0B2DE685DA208B9A
+:10356000198C158D14580D90D2A0D10FDC70DF503E
+:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
+:103580005563FE53002B203D0B4B4065BC826FE51D
+:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
+:1035A000F3162AAC10C090292467090F4764FC6009
+:1035B00060015F00C0FA63FF85C09163FFE8881473
+:1035C000658168DA20DB608C15580DA7C020C0909B
+:1035D00029A403D10F8A162B2104580CC9C0A02A94
+:1035E00024668E6863FDCA00002B9CF965B0FDDA85
+:1035F00020580CCE63FC220000DA20C0B6580E2CF6
+:1036000063FFBA002B200C0CBE11A7EE2DE286C181
+:10361000C27DC30260011819E4E909B90A2992A31D
+:103620006890082A220009AA0C65A10326E2856495
+:1036300060FD2C20668931B1CC0C0C472C24666FC0
+:10364000C60270960C8A162B2104580CADC0D02DE2
+:1036500024668E3077E74D1CE4E91BE4E98F32885D
+:1036600033C0A42D21040E994006991104DD1109DF
+:10367000DD029A61C19009DD029B60C0908B2B9D99
+:10368000649F66986799650CBB029B6228200C1AA0
+:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
+:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
+:1036B0008B142C2523C8B7022A02066B02580CDE95
+:1036C0002A210265AEF7C0D80DAD022D250263FE9A
+:1036D000EC008E14C8E8DA20DB30580CD72A21021F
+:1036E00065AEDA07AF022F250263FED100DA20DBD8
+:1036F000308C158D14580E80D2A0D10FDA202B20DB
+:103700000C580DEB63FEB600DA202B200C580E0D82
+:1037100063FEAADA20DB308C152D12042E0A8028D5
+:103720000A00282468580CD663FAE500C020D10F9F
+:10373000DA20580DDF8914CD92DA20DB308C155851
+:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
+:103750000F2A2C748B1558064CD2A0D10F000000F4
+:103760006C100E28210224160108084C6583A91F3D
+:10377000E49229F29E6F98026003AD1EE48E29E266
+:10378000266890082A220009AA0C65A39B24F29DB2
+:103790006443952A31160A4B412B240BB4BB0B0B07
+:1037A000472B240C0CB611AF66286286C1CC78C3B7
+:1037B0000260037F19E48209B90A2992A36890077D
+:1037C0008C2009CC0C65C36B276285647365293135
+:1037D00009C0D02D24668C3599139C2A88369C14F8
+:1037E000982B8E3798159E169E2C8C38C0E10C5C59
+:1037F000149C179C2D88392925042E251D28251C4D
+:103800002C3028C0822C243C2930290C0C4708C8B5
+:103810000129243D29311598189912090841089960
+:103820000C299CEC29251F7EC725921C8212282A70
+:1038300000082060991B01023E00093EB128098260
+:1038400039891B0E221102990C821C29251F821C0A
+:10385000941D951E24211F15E4880451609A10C1FF
+:10386000802B1610252014961F05054301063E00E7
+:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
+:10388000441C893D2E26132E26142E26152E246B1D
+:1038900025241406D61CC05025261825261B2524B1
+:1038A000672524682832112525232525242525254B
+:1038B00025252C2925222D25202B252124252E26A2
+:1038C000252F14E46F16E46D1BE45298192D211C6A
+:1038D000C08498719B70892095759577957F967CAB
+:1038E000967E98799B7894731BE46714E4680C388F
+:1038F000400288100C064015E464016610947D9B1C
+:1039000074841D1BE444086602957B18E431851E0F
+:103910000B99029972997A0866022B121096768694
+:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
+:1039300000A10400E81A7D8B0263FFEE891AC0E043
+:10394000961F1DE43E2B1610951E941D28203D2920
+:10395000761A297612C040C051C0B22D76130806DF
+:10396000408D170B8801065E380AEE101BE44A08EA
+:103970005438B0A609661188140B44102B761B042A
+:10398000EE028B1614E44308DA1406EE020D8810DA
+:103990002A761E86131AE41C04EE020D66110866D0
+:1039A000022E76160D14141EE41A0D44110BD814B1
+:1039B0000866020A44022E76182E76102476172600
+:1039C000761FC084287619287611C76F0C24400F03
+:1039D00044111CE3FB26761D26761C2676152676DA
+:1039E000148A262676242676252976222E762028E5
+:1039F00076218E1888150DB91016E4278BC70D880F
+:103A0000110E5E39ADBB851904EE022676230988B6
+:103A100002861F89102876260A04480544110505E8
+:103A2000480E551105440204EE02851E841D2E76B3
+:103A3000272820069B2D29246A2E31172B12102EA1
+:103A40002538CC83C0D02D2407C0D7090840648016
+:103A50008E9A290928416480AA64E0B42D2406C006
+:103A60009809E9362D0AA02A628501C404ADAA2D61
+:103A700021042A668508DD11883F8E3E2732100812
+:103A8000EA1800C40408E8180088110ECE5308771D
+:103A900002C08308DD029D4118E401090D4E9840E3
+:103AA00088209A4397449D4517E3FE1DE3CB058884
+:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
+:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
+:103AD00097CA28A4A268711C655060C020D10F004D
+:103AE0002D2406C080C09809E9360E893863FF731B
+:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
+:103B0000D600000065EF54C098C0D82D240663FF8E
+:103B1000522D2406C09063FF4ACC57DA20DB308C4C
+:103B200011580C51C020D10F00DA20C0B6580CE05B
+:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
+:103B400011580551D2A0D10F6C10062820068A33D7
+:103B50006F8202600161C05013E39729210216E3CE
+:103B600096699204252502D9502C20159A2814E331
+:103B7000948F2627200B0AFE0C0477092B712064F2
+:103B8000E1398E428D436FBC0260016F00E104B0E9
+:103B9000C800881A08A80808D80298272B200668A9
+:103BA000B32ECE972B221E2C221D0111027BC901A0
+:103BB000C0B064B0172CB00728B000DA2003880A20
+:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
+:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
+:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
+:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
+:103C0000007E894F29C2851EE37E6490461FE38CA7
+:103C10009E90C084989128200A95930F88029892CC
+:103C20008E200FEE029E942F200788262F950A984B
+:103C3000969A972E200625240768E3432921022A15
+:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
+:103C500063FF4E002E2065CBEDC082282465C9F697
+:103C600005E4310002002A62821BE36D2941020B48
+:103C7000AA022A668209E43129210263FF23000097
+:103C800064DFB88F422E201600F1040DEE0C00EE1A
+:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
+:103CA000B0293221283223B4992936217989A92BC8
+:103CB00032222B362163FFA0C020D10F9F2725245D
+:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
+:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
+:103CE000D0868E290EAE0C66E089C0F128205A28B5
+:103CF0008CFE08CF3865FEE863FF580000E00493AF
+:103D000010C0810AF30C038339C78F08D80308A8B1
+:103D10000108F80C080819A83303C80CA8B82875BE
+:103D200020030B472B24158310CBB700E104B0BC54
+:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
+:103D4000990209094F29250263FE50002D206A0DB2
+:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
+:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
+:103D7000FF79000028221F658E2763FF6E25240629
+:103D800029210263FE1B00006C10066571332B4C69
+:103D900018C0C7293C18C0A1C08009A8380808422B
+:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
+:103DB0000F6DAA0500B08800908C8940C0A00988CA
+:103DC000471FE32B080B47094C50090D5304DD1026
+:103DD000B4CC04CC100D5D029D310CBB029B30882D
+:103DE000438E2098350FEE029E328D26D850A6DDE8
+:103DF0009D268E40C0900E5E5064E0971CE3111E1D
+:103E0000E300038B0BC0F49FB19EB02D200A99B341
+:103E10000CDD029DB28F200CFF029FB48E262D2058
+:103E2000079EB68C282DB50A9CB72924072F20069B
+:103E30002B206469F339CBB61DE2E22320168DD224
+:103E40000B330C00D10400331AB48DA3C393292281
+:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
+:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
+:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
+:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
+:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
+:103EA000AFEEACBB22B28529E4CF02820B22B685ED
+:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
+:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
+:103ED00018DD50580A9B8940C08063FEE3066E02DD
+:103EE000022A02DB30DC40DD505800049A10DB501F
+:103EF000DA70580465881063FEF700006C100692B3
+:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
+:103F10000BD9A07DA30229ADF875C302600084C04F
+:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
+:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
+:103F4000D8F000808800708C879068B124B2227706
+:103F5000D3278891C0D0CB879890279C1000708879
+:103F600000F08C9D91CB6FC08108BB0375CB36638D
+:103F7000FFB4B1222EEC1863FFD485920D770C8626
+:103F8000939790A6D67D6B01B1559693959260005C
+:103F900016B3CC2D9C188810D9D078D3C729DDF85A
+:103FA00063FFC100C0238A421BE2C000CD322D4412
+:103FB000029B3092318942854379A1051EE2BC0EF5
+:103FC000550187121BE2AB897095350B9902993226
+:103FD00088420A880C98428676A6A696768F44AFC9
+:103FE000AF9F44D10F0000006C10089311D63088A9
+:103FF00030C0910863510808470598389812282165
+:1040000002293CFD08084C6581656591628A630A56
+:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
+:104020005ACCC42D0A022D245A7FE0026002158961
+:104030002888261FE29F09880C65820F2E200B0F0F
+:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
+:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
+:1040600080084837B88DB488981089601AE2557B6B
+:1040700096218B622AA0219C147BA3179D132A20D2
+:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
+:104090006001C4002E200C1BE2480CEA110BAA0898
+:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
+:1040B000F0052822007F892C2BA28564B0AA876294
+:1040C0008826DE700C7936097A0C6FAD1C8F279B21
+:1040D0001508FF0C77F3197E7B729D139C149B15BA
+:1040E000CF56600025C0B063FFD0D79063FFDD00DE
+:1040F000009D139C14DA20DB70580B2F8B158C1449
+:104100008D1365A06A8E6263FFCC00DA208B11DC10
+:1041100040580AD5D6A08B15C051DE70DA20DC607D
+:10412000DD405BFF768D138C14D9A02E200C1BE292
+:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
+:104140002EF4CF0B990B29A68563FF1D00DA20DC26
+:1041500060DD40DE708912282007DF50A9882824FE
+:10416000075BFF09D2A0D10F00DBE0DA20580B502B
+:104170006550EF2A20140A3A4065A0EBDB60DC4072
+:10418000DD30022A025809BCD6A064A0D584A183E0
+:10419000A00404470305479512036351C05163FE11
+:1041A0005C2C2006D30F28CCFD6480A568C704C012
+:1041B000932924062C2006C0B18D641FE2019D279F
+:1041C0009D289D298FF29D2600F10400BB1A00F066
+:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
+:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
+:1041F000BB36C0E20B0B470EBB372B241618E1F978
+:104200000A09450D0B422B240B29240AB4BE2E2487
+:104210000C7D88572920162FCCFDB09D0A5C520DCD
+:10422000CC362C246465FDEC0C0C4764CDE618E11B
+:10423000E48E2888820C9F0C00810400FF1AAFEEE8
+:104240009E2963FDCF1CE21163FE13001CE20B6389
+:10425000FE0C8D6563FFA500DA202B200C580B396E
+:10426000645F0FC020D10F00C020D10FC09329245C
+:1042700016C09363FFA000006C1004C06017E1CD6E
+:104280001DE1D0C3812931012A300829240A78A1EF
+:1042900008C3B27BA172D260D10FC0C16550512654
+:1042A00025022AD0202F200B290AFB2B20142E2098
+:1042B0001526241509BB010DFF0928F1202B241414
+:1042C000A8EE2EF52064A0A92B221E28221D011184
+:1042D000027B8901DB6064B0172CB00728B000DADC
+:1042E0002007880A28824CC0D10B8000DBA065AF74
+:1042F000E7DB30DC40DD50DA205800DE29210209FE
+:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
+:10431000372ED02064E02D022A02033B02DC40DD70
+:10432000505800D4D2A0D10F2B2014B0BB2B241492
+:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
+:1043400002D2A0D10FC020D10F2E200669E2C126D3
+:1043500024062B221E2F221D29200B2820150D9903
+:10436000092A9120262415AA882895207BF14960E6
+:104370000048B0BB2B24140B0A4164A0627CB70236
+:104380002C25022B221E2C221DD30F7BC901C0B06D
+:10439000C9B62CB00728B000DA2007880A28824C5A
+:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
+:1043B000262406D2A0D10F0000DB601DE18164BF7E
+:1043C0004F2CB00728B000DA2007880A28824CC09A
+:1043D000D10B8000DBA065AFE71DE17963FF310001
+:1043E00026240663FF9C00006C1004282006260A81
+:1043F000046F856364502A2920147D9724022A02C1
+:10440000DB30DC40DD50580019292102090A4CC874
+:10441000A2C020D10FC0B10B9B022B2502C020D11E
+:104420000F00022A02033B022C0A015800D1C9AA3C
+:10443000DA20DB30DC40580A0C29A011D3A07E978B
+:10444000082C0AFD0C9C012CA411C0512D2014062F
+:10445000DD022D241463FFA4DA20DB30DC40DD50C4
+:10446000C0E0580987D2A0D10F0000006C100616DA
+:10447000E1521CE152655157C0E117E14E2821027B
+:104480002D220008084C6580932B32000B695129BE
+:104490009CFD6590872A629E6EA84C2A722668A0B1
+:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
+:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
+:1044C0002FF2A368F0052822007F89072DD285D31B
+:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
+:1044E00020580883600035002D21041BE1757DBB39
+:1044F00024DA20C0B658087ECA546001030B2B5042
+:104500002B240BB4BB0B0B472B240C63FFA0DA202E
+:10451000580A67600006DA20C0B6580A656550E0A0
+:10452000DC40DB302D3200022A020D6D515808D2DA
+:104530001CE123D3A064A0C8C05184A18EA00404B0
+:10454000470E0E4763FF3500002B2104C08B8931D5
+:10455000C070DF7009F950098F386EB8172C2066CB
+:10456000AECC0C0C472C24667CFB099D105808E44B
+:104570008D1027246694D11EE126B8DC9ED06550AC
+:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
+:10459000F119E10518E10728967EB04BD30F6DBAEB
+:1045A0000500A08800C08C2C200CC0201DE10B0C45
+:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
+:1045C000F685D10FC0800AB83878D0CD63FFC1001E
+:1045D0008E300E0E4763FEA12A2C742B0A01044D67
+:1045E000025808D72F200C12E0FC0CF911A699A252
+:1045F000FF27F4CF289285D2A008480B289685D1B2
+:104600000FC020D10F0000006C1004C060CB55DB40
+:1046100030DC40055D02022A025BFF942921020979
+:10462000084CC882D2A0D10F2B2014B0BB2B24146D
+:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
+:10464000D2A0D10F0000022A02033B02066C02C076
+:10465000D0C7F72E201428310126250228240A0F5E
+:10466000EE012E241458010E63FFA300262406D267
+:10467000A0D10F006C1006282102D62008084C6536
+:10468000809D2B200C12E0CC0CB811A2882A8286C7
+:10469000B5497A930260009719E0C909B90A2992CD
+:1046A000A36890082A620009AA0C65A08228828566
+:1046B0001CE0D46480799C80B887B14B9B819B10AF
+:1046C000655074C0A7D970280A01C0D0078D380D75
+:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
+:1046E0000F6D4A0500808800908C2E3008C0A00015
+:1046F000EE322E740028600C19E0B80C8D11A2DD8A
+:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
+:10471000D685D10FC0F0038F387FA0C063FFB400EF
+:10472000CC582A6C74DB30DC4058080BC020D10F09
+:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
+:104740007058087F2E30088B1000EE322E7400282F
+:10475000600C19E0A10C8D11A2DDA988C0202CD21B
+:10476000852284CFD2A00CBC0B2CD685D10F0000A3
+:104770006C1004292014282006B19929241468817A
+:1047800024C0AF2C0A012B21022C24067BA004C0DC
+:10479000D02D2502022A02033B02044C02C0D0584D
+:1047A00000C0D2A0D10FC020D10F00006C1004298E
+:1047B0003101C2B429240A2A3011C28378A16C7B4A
+:1047C000A1696450472C2006C0686FC562CA572D86
+:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
+:1047E000292102090E4CC8E2C020D10FC0F10F9F51
+:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
+:10480000DC28201406880228241463FFC7292015F9
+:104810001BE06C2A200BC0C09C240BAA092BA120F2
+:104820002C2415AB9929A52063FF9900C020D10F36
+:10483000DA20DB30DC40DD50C0E0580891D2A0D156
+:104840000F0000006C1004CB5513E06725221F0DEC
+:10485000461106550CA32326221E25261F06440BAF
+:1048600024261E734B1DC852D240D10F280A80C087
+:104870004024261FA82828261E28261DD240D10FF6
+:10488000C020D10F244DF824261E63FFD80000005D
+:104890006C1004D620282006C0706E85026000D4FB
+:1048A0001DE04E19E04612E0442A8CFC64A1302B36
+:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
+:1048C000B8110288082E828609B90A7EC3026000E8
+:1048D0009A2992A368900509AA0C65A08E28828562
+:1048E000648088B8891BE04A94819B80655155C0DB
+:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
+:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
+:104910000500808800908CC0A029600C0C9C11A21E
+:10492000CC2BC285AD990B4B0B2BC6852860062777
+:1049300094CF6881222D6015D2A0C9D2C0E22E6426
+:1049400006D10F00C0F008AF387FB0BD63FFB100E3
+:10495000276406D2A0D10F00D2A0D10F00CC57DA25
+:1049600060DB30DC405808C0C020D10FDA60580945
+:104970005063FFE80028221E29221DD30F789901D9
+:10498000C080C1D6C1C11BE018C122AB6B6480429C
+:1049900078913F2A80000CAE0C64E0BB02AF0C643F
+:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
+:1049B000E864E0A32FACE764F09D2EACE664E097DA
+:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
+:1049D00065AFBC28612308D739D97060007B00001F
+:1049E0002B600C0CB811A2882C82862A0A087CAB9A
+:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
+:104A00002A828564A0691FDFFE276504C0E3C0C455
+:104A10002E64069CA11CE02B9FA02E600A97A30C7D
+:104A2000EE029EA28F600CFF029FA42E60147AEF0C
+:104A30004627A417ADBC2F828527C4CF2FFC202F7B
+:104A4000868563FE692A6C74C0B1DC90DD4058072E
+:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
+:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
+:104A7000DA6058090F63FEE4290A0129A4170DBF63
+:104A8000082E828527F4CF2EEC202E868564500BCD
+:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
+:104AA0006C10062B221E28221D93107B8901C0B09A
+:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
+:104AC0000747010E4E01AD2D9E11C0402E0A146401
+:104AD000B06E6D084428221D7B81652AB0007EA13E
+:104AE0003B7FA1477B51207CA14968A91768AA1484
+:104AF00073A111C09F79A10CC18B78A107C1AE2908
+:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
+:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
+:104B200089116987BB649FB863FFDC00647FB4634D
+:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
+:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
+:104B5000107CB1217AB901C0B0C9B913DF96DA204F
+:104B600028B0002CB00703880A28824CC0D10B80E3
+:104B700000DBA065AFE7D240D10F8910659FD463F9
+:104B8000FFF300006C1008C0D0C8598C30292102F6
+:104B90000C0C4760000C8E300E1E5065E19E2921E2
+:104BA00002C0C116DF85090B4C65B0908A300A6ED1
+:104BB0005168E3026000852F629E1BDF7E6EF85312
+:104BC0002BB22668B0052E22007BE94727629DB7ED
+:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
+:104BE000F2869E12798B4117DF7507B70A2772A3E9
+:104BF000687004882077893029F285DF90D7906526
+:104C000090652A210419DFAE7A9B22DA205806B873
+:104C1000600029002C21041BDFAA7CBB18DA20C00D
+:104C2000B65806B3C95860014CC09063FFCCDA2077
+:104C300058089F600006DA20C0B658089D655135B7
+:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
+:104C5000D3A064A120292102C05184A18CA0040406
+:104C6000470C0C4763FF3E00C09B8831DBD008F83F
+:104C700050089B3828210498116E8823282066ACA0
+:104C80008C0C0C472C24667CBB159F139E148A1039
+:104C90008B1158071B8E148F13C0D02D24668A30B9
+:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
+:104CB0000827FC106550A4B83ADF70C051C08007C7
+:104CC000583808084264806718DF3819DF392986A8
+:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
+:104CE000A08930B4E37F9628C0F207E90B2C940822
+:104CF0009B909F912F200C12DF380CF811A6882969
+:104D00008285A2FF2DF4CFD2A009330B238685D153
+:104D10000F22200C891218DF300C2B11A6BBA82201
+:104D20002D24CF2CB285D2A00C990B29B685D10F9A
+:104D3000C087C0900A593879809663FF8ADB30DAE1
+:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
+:104D500065AE4D2D2502C09063FE45009E142A2CA1
+:104D600074C0B1DC70DD405806F68E14C0D01BDF75
+:104D700028C1C863FF6AC020D10F00006C1006284C
+:104D8000210217DF0D08084C65824929729E6F9831
+:104D90000260025019DF082A922668A0078B200AB9
+:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
+:104DB000C0602B3008C0F164B0712E0AFFB0B86437
+:104DC00081512DBCFE64D0F364505C2A2C74044BDA
+:104DD000025800AD0AA2020600000000001ADF0817
+:104DE0002C20076EBB0260022218DEFE13DF081BB8
+:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
+:104E000099D223B08026B480B13308330293D318EB
+:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
+:104E2000EC0B2CD685655FA2C020D10F2B21048806
+:104E300031DE6008F85008CE386EB8102C2066B10C
+:104E4000CC0C0C472C24667CEB026001AF2E30109A
+:104E50002930112C301300993200CB3264E1452AFD
+:104E600030141EDF1A00AA3278CF050E9C092BC41D
+:104E70007F1CDF1766A0050E98092A8480B4A71846
+:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
+:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
+:104EA0003303AC882A848B2CD03627848C03CC0126
+:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
+:104EC000FB11A7BB2DB286C0987D9302600121190A
+:104ED000DEBB09F90A2992A36890082D220009DD9A
+:104EE0000C65D10C2DB285DE6064D10488312B2194
+:104EF0000408F85008CE386FB80263FEDF2C206635
+:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
+:104F100060013100293108292504283014B0886443
+:104F200080A62B31092B240AC0812B30162FD423C5
+:104F30002B240BB4BC2C240C8D378B36292504DE96
+:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
+:104F50009C1101C4048F380DBE1800C4040DB8188C
+:104F600000881108FF02C08308CC0218DECC9CA187
+:104F700098A018DECB8C209EA39FA405CC110BCF4C
+:104F800053C1E09EA50CFF0208FF029FA218DE8914
+:104F90002624662C729D2684A22CCC182C769D6328
+:104FA000FE250000002D30121CDECD00DA3278DF45
+:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
+:104FC0002A301100AA3263FEEC2E240A2B31099BF1
+:104FD0002B63FF5300CC57DA20DB30DC405807222C
+:104FE000C020D10F00DA20C0B65807B163FFE5003A
+:104FF00000DBF0DA205807AE63FFD9000058064006
+:105000001DDE70C0F126246663FE41008B20280A55
+:10501000FFB1CE23200A2C21040E0E472E24077840
+:1050200031359AD02CD50A96D319DEA62ED416C0C7
+:105030008398D1C0E309B80298D409390299D226DD
+:10504000240763FDC958062E8D102624662B2104E3
+:105050002F200C63FD86000008B81119DE6808EEE9
+:1050600002882B9ED59AD0C0EF09880298D204C935
+:10507000110E990299D4C0E49ED163FFC1000000D3
+:105080006C1004C020D10F006C100485210D381164
+:1050900014DE478622A42408660C962205330B935F
+:1050A00021743B13C862D230D10FC030BC29992182
+:1050B00099209322D230D10F233DF8932163FFE34F
+:1050C0006C100AD620941817DE3CD930B8389819DD
+:1050D0009914655256C0E1D2E02E61021DDE390EF0
+:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
+:1050F000F1558EE129D0230E8F5077E66B8F181E65
+:10510000DE78B0FF0FF4110F1F146590CE18DE7516
+:105110008C60A8CCC0B119DE2728600B09CC0B0D20
+:10512000880929812028811E2A0A0009880C08BACA
+:10513000381BDE6B0CA90A2992947B9B0260008CC1
+:105140002B600C94160CBD11A7DD29D286B84879C6
+:1051500083026000D219DE1909B80A2882A39817C1
+:105160006880026000A36000A51ADE5F84180AEE62
+:1051700001CA981BDE108C192BB0008CC06EB313C3
+:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
+:10519000AE6000380C0C5360000900000018DE51AE
+:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
+:1051B000880929812028811E2A0A0009880C08BA3A
+:1051C000380CA90A2992947E930263FF72DA60C0B8
+:1051D000BA58073764507360026A00001ADDF68C13
+:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
+:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
+:1052000063FFC9000C0C5363FF0989607899182962
+:10521000D285C9922B729E1DDDE76EB8232DD22652
+:10522000991369D00B60000DDA60580721600017F0
+:105230000088607D890A9A1A29729D9C129915CF5F
+:1052400095DA60C0B658071A6551F98D148C18DBD1
+:10525000D08DD0066A020D6D51580587D3A09A14DF
+:1052600064A1E182A085A1B8AF9F1905054702029C
+:10527000479518C05163FE602B6104C08B8931C013
+:10528000A009F950098A386EB81F2C6066A2CC0CB0
+:105290000C472C64667CAB119F119E1B8A15580528
+:1052A000988E1B8F11C0A02A64669F1164F0E58957
+:1052B0001388190FFD022E0A006DD9172F810300E4
+:1052C000908DAEFE0080889F9200908C008088B800
+:1052D0009900908C65514E8A10851A8B301FDDC85D
+:1052E000881229600708580A2C82942D61040ECC7C
+:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
+:105300005D50A29909094729C48065D0DA2E600C46
+:10531000C0D01FDDB10CE811AFEEA7882282852D29
+:10532000E4CF02420B228685D2A0D10F8E300E0E22
+:105330004763FDA2A29C0C0C472C64077AB6CD8B68
+:10534000602E600A280AFF08E80C64810E18DDDD73
+:1053500083168213B33902330B2C34162D350AC051
+:105360002392319F30C020923308B20208E80292A3
+:10537000349832C0802864072B600CD2A01CDD96C4
+:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
+:105390002DE685D10F8B1888138D30B88C0D8F4773
+:1053A0000D4950B4990499100D0D5F04DD1009FFEB
+:1053B000029F800DBB029B8165508D851AB83AC053
+:1053C000F1C0800CF83808084264806B1BDD771947
+:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
+:1053E000C08CC0A063FEF30082138B161DDD8828DD
+:1053F000600AC0E02EC4800D880202B20B99239F80
+:1054000020C0D298229D2122600CB2BB0C2D11A786
+:10541000DD28D28508BB0B18DD702BD685A8222E7F
+:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
+:10543000FF168E1B63FEA300C087C0900AF938795F
+:10544000809263FF86C020D10F9E1B2A6C74C0B16E
+:105450008D1858053B8E1B851A63FE7E886B821360
+:10546000891608BE110ECE0202920B9E25B4991E1B
+:10547000DD639F200E88029822C0EF04D8110E88A9
+:10548000029824C0E49E21C080D2A02B600C286426
+:10549000071CDD510CBE11A7EE2DE285ACBB28B474
+:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
+:1054B00020D10F006C10048633C071C03060000131
+:1054C000B13300310400741A0462017460F1D10F29
+:1054D0006C1004022A02033B025BFFF61CDD391B41
+:1054E000DD83C79F88B009A903098A019AB0798032
+:1054F0001EC0F00FE4311DDD300002002BD2821EF1
+:10550000DD7C2AC1020EBB022BD6820AE431D10F08
+:1055100028C102C19009880208084F28C50208E482
+:1055200031D10F006C1004C0C00CE43112DD251A1B
+:10553000DD2200020029A28218DD701BDD6E26210B
+:10554000020B990108660129A68226250206E4318C
+:1055500014DD6B15DD66236A9023261685502426FC
+:1055600015252617222C50D10F0000006C1008D6EC
+:10557000102B0A64291AB41ADD0F0D23111CDD103B
+:105580000F2511B81898130E551118DD5DAC55A8EC
+:1055900038AA332C80FF2A80FEA933288D01298068
+:1055A0000108AA112880000CAA02088811098802A3
+:1055B00008AA1C288C0828160458086814DD010A5B
+:1055C000A70224411A2A30802B120407AA2858085F
+:1055D00063B1338B13B4559A6004AC28B4662C566F
+:1055E0002B7B69E016DD3A9412C050C0D017DCF472
+:1055F0009D15D370D4102F60802E60829F169E1749
+:10560000881672891A8D128C402A607F0DCC282B47
+:105610003A200CAA28580851C0B10ABE372E354886
+:105620008F1772F91A8D128C402A60810DCC282BAD
+:105630003A200CAA28580849C0B10ABE372E354A6C
+:10564000B233B444B1556952B6B466C0508F15B880
+:1056500077D370B2FF9F156EF899D10F6C1004C00C
+:1056600021D10F006C1004270A001CDCD31FDCE4DE
+:105670001EDCE71DDCD01ADD141BDD22C02824B09F
+:10568000006D2A75AA48288080C09164806100411D
+:105690000415DCCBC03125503600361A06550105FD
+:1056A00095390C56110C66082962966E974D0D5966
+:1056B0000A29922468900812DD0602420872993B7A
+:1056C00023629512DCC8CB349F300282020E440262
+:1056D000C092993194329233AD52246295C0902495
+:1056E0004C1024669524B0002924A0AA42292480C5
+:1056F000B177B14404044224B400D10FD10FD10FCB
+:105700006C10041ADCAC2AA00058021C5BFFD50206
+:105710002A02033B025BFFD11BDCAAC9A12CB10208
+:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
+:10573000C0A00AE43118DCA00002002F828219DC2C
+:10574000B32EB10209FF022F86820EE431D10F0081
+:105750006C1004C02002E43114DC9A16DC970002BD
+:1057600000226282234102732F0603E431C020D15C
+:105770000F19DCE61ADCE52841020A2A0109880132
+:105780002A668228450208E43115DCDC12DCE125BA
+:105790004621D10F6C1004292006289CF96480A0B2
+:1057A0002A9CFD65A0968A288D262F0A087AD9049E
+:1057B0002B221FC8BD2C206464C0812E22090EAE8E
+:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
+:1057D000C28619DC7A78F3026000AD09B90A299211
+:1057E000A36890082E220009EE0C65E09B29C28573
+:1057F0001FDC846490929F90C0E41FDC919E9128EE
+:10580000200AC0E09E930F8802989288200F880299
+:1058100098942F20079A979D962F950A2E24072853
+:10582000200629206468833328C28512DC6B288C0B
+:1058300020A2B22E24CF28C685C020D10FC020D1EF
+:105840000F2A206A0111020A2A4165AF52DA20C0EC
+:10585000B05805EA64AFE5C021D10F00649FC81FAE
+:10586000DC582D20168FF209DD0C00F10400DD1A42
+:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
+:105880008C2028C685C020D10FC021D10F00000078
+:105890006C1004260A001BDC9F15DC4928206517C4
+:1058A000DC46288CFE6480940C4D110DBD082CD272
+:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
+:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
+:1058D000F52AD6F406E4310002002872822AFAFF83
+:1058E000004104290A012F510200991A0A9903095B
+:1058F00088012876820FE4312624652BD2F48E5C51
+:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
+:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
+:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
+:10593000020B0B4F2B55020BE431D10F00DB30DA99
+:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
+:10595000FFA8000006E4310002002F728218DC303C
+:105960002E510208FF022F76820EE431D10F000083
+:105970006C1004C03003E43116DC1015DC11000299
+:105980000024628274472118DC64875C084801287F
+:105990006682CD7319DC620C2A11AA99229283299E
+:1059A00092847291038220CC292B51020BE431C0E6
+:1059B00020D10F001FDC5B2E51020FEE012E55028D
+:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
+:1059D000561DD10F6C10061BDBF71EDBF922B00041
+:1059E0001ADC526F23721DDC39C04818DC511FDCF1
+:1059F0004FDC10D5C083F000808600508A6D4A4F7E
+:105A00000F35110D34092440800B560A296294B1D8
+:105A1000330E55092251480F44110C440A8740099E
+:105A2000A80C02883622514907883608770CA899B5
+:105A30002966949740296295874109A80C02883607
+:105A400007883608770CA899296695974103034281
+:105A5000B13808084298F0D10F1CDC3613DC372728
+:105A6000B0002332B5647057C091C0D016DC351534
+:105A7000DC33C0402AC00003884328C4006D793C51
+:105A8000004104B14400971A7780148E502FB295CC
+:105A90002DB695AFEE2EED2006EE369E5060001826
+:105AA00077A00983509D5023B69560000223B295DC
+:105AB000223D2006223622B695B455B8BBD10F0040
+:105AC00003884328C400D10F6C1004C04004E431A3
+:105AD00015DC1D000200885013DC1CCB815BFFBD70
+:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
+:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
+:105B0000990C0929146000050BA90C092914993076
+:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
+:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
+:105B3000D10F000004E4311EDBA00002002DE28240
+:105B40002FBAFF2C51020FDD012DE6820CE431D17A
+:105B50000F0000006C1004D10F0000006C1004C096
+:105B600020D10F006C100413DBFAC0D103230923EA
+:105B7000318FC0A06F340260008D19DB8F1BDB906A
+:105B800017DBF30C2811A8772672832572822CFA72
+:105B9000FF76514788502E7285255C0425768275E4
+:105BA000E9052572842576827659292E72842E760F
+:105BB000822E76830AE431000200239282002104BF
+:105BC0002FB10200D61A0C66030633012396820F0A
+:105BD000E43126728325728260000200D8A07659D3
+:105BE000220AE43100020023928200210400D21A2A
+:105BF0002FB1020C22030232012296820FE431D22D
+:105C000080D10F00D280D10FC020D10F6C1004DBE7
+:105C100030862015DB68280A00282502DA2028B003
+:105C2000002CB00705880A28824C2D0A010B800041
+:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
+:105C4000769101D10F2BA6A3D10F00006C1004C0D8
+:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
+:105C60007786758574C0A076516288508E77B4555A
+:105C7000957475E903857695747659278F769F75A7
+:105C80009F740AE431000200239282B42E2FB102E5
+:105C900000E10400D61A0C66030633012396820F36
+:105CA000E431867583747639280AE4310002002EC7
+:105CB0009282B42200210424B10200DF1A0CFF03F7
+:105CC0000FEE012E968204E431D280D10FD8A07657
+:105CD00051D6D280D10F00006C1004290A801EDB3F
+:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
+:105CF000B2850FCC029ED19CD0C051C07013DB592D
+:105D000014DB5818DB562AB285A82804240A234637
+:105D100091A986B8AA2AB685A98827849F25649F59
+:105D2000D10F00006C100419DB8B0C2A11A9A98972
+:105D300090C484798B761BDB79ABAC2AC2832CC2EE
+:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
+:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
+:105D6000C94AA929299D0129901F68913270A6036B
+:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
+:105D8000FFB3D230D10F000013DB7503A3018C31B8
+:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
+:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
+:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
+:105DC00028300022300708481209880A28824CDC53
+:105DD000200B80001BDAF90C4A11ABAA29A2840916
+:105DE000290B29A684D10F006C1004C04118DAF2E7
+:105DF00017DAF40C2611A727277038A866256286C3
+:105E0000007104A35500441A75414822628415DBD1
+:105E10001502320BC922882117DAF10884140744CD
+:105E200001754905C834C020D10FD10F0809471D9D
+:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
+:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
+:105E50006684D10FC020D10F6C1004DB30C0D01885
+:105E6000DAD5DA2025300022300708580A28824C7B
+:105E7000DC200B80008931709E121BDACF0C4A1196
+:105E8000ABAA29A28409290B29A684D10F09C952DA
+:105E900068532600910418DACAC0A12F811600AAFF
+:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
+:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
+:105EC0009A0A0A472EF11600A10400881A08EE0269
+:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
+:105EE0000B2BC684D10F00006C1004DB30C0D0191E
+:105EF000DAB1DA2028300022300709880A28824CDB
+:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
+:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
+:105F200016DAA80C2711A626266038A87225228624
+:105F3000006104A35500441A7541082222840232EC
+:105F40000BD10F00C020D10F6C100415DB050249E6
+:105F5000142956112452120208430F8811C07300ED
+:105F6000810400361A008104C78F00771A0877036E
+:105F7000074401064402245612D10F006C10066E2D
+:105F800023026000AC6420A7C0A0851013DADD16E0
+:105F9000DAF4C040A6AA2BA2AE0B19416490666841
+:105FA000915D68925268933C2AA2AA283C7F288C73
+:105FB0007F0A0A4D2980012880002AACF208881146
+:105FC0000988027589462B3D0129B0002BB00108D4
+:105FD00099110B99027A9934B8332A2A00B1447284
+:105FE00049B160004A7FBF0715DADF63FFB90000DF
+:105FF000253AE863FFB10000253AE863FFA90000F5
+:10600000250A6463FFA1C05A63FF9C0000705F080B
+:106010002534FF058C142C34FE70AF0B0A8D142E22
+:106020003D012AE4012DE400DA405BFD5063FFA747
+:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
+:106040001BDACBC080C07160000D00000022A438B4
+:10605000B1AA299C107B915F26928679C2156E6247
+:1060600062C0206D080AB12200210400741A764B28
+:10607000DB63FFEE2292850D6311032514645FCF6D
+:10608000D650032D436DD9039820B4220644146DD5
+:106090004922982098219822982398249825982678
+:1060A000982798289829982A982B982C982D982EDC
+:1060B000982F222C4063FF971EDA4027E68027E6C0
+:1060C00081D10F00C02063FF830000006C1004C06A
+:1060D00062C04112DA3B1ADA3713DA522AA00023DF
+:1060E000322D19DA9F2BACFE2992AE6EA30260000E
+:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
+:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
+:10611000015805922B211A0ABB28D3A09B50580581
+:10612000A92B52000ABB082A0A005805A815DA91C3
+:106130002D21022C3AE80C3C2804DD022D25029C7E
+:10614000505805A08B50AABBC0A15805A01CDA8AE4
+:106150002D21020C3C2806DD0213DA882D25029C35
+:10616000305805988B30AABBC0A25805982A210246
+:10617000C0B40BAA020A0A4F2A25025805ACD10F57
+:10618000242423C3CC2C251A63FF760018DA801C44
+:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
+:1061A0001FDA7C2D203624F47A24F47E24F4820E27
+:1061B000DD0124F4862E0AF707552806DD02C07596
+:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
+:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
+:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
+:1061F0002E0A800D0D4627546C24546B0EDD022DA3
+:10620000243663FEFC0000006C10042A0A302B0ABE
+:10621000035BFF4D12DA53C390292616C3A1C0B306
+:10622000C08A2826175BFF48C03CC3B12B26161A2C
+:10623000D9E42AA02023261764A079C3A2C0B15BA9
+:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
+:10625000C0B12326175BFF3CC28F282616C0FE2F35
+:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
+:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
+:106280002926175BFF31C3C62C2616C1B32A0AA2E2
+:106290002B2617C0B35BFF2C290AA2292616C1851D
+:1062A000282617C2FB2F2616C0E72E26171DDA391F
+:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
+:1062C0006C10041CDA031BD9ED18DA3317DA341614
+:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
+:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
+:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
+:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
+:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
+:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
+:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
+:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
+:106350008A2E369F2C369E2C369DB1ACC07919D929
+:10636000D81BDA1413DA121ADA1218DA1314D9D97C
+:1063700016DA1304F42812DA1204660C040506A2D5
+:1063800052A858AA5AA3539B3029A50027848AC033
+:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
+:1063A0005726361D26361E2E361F16DA0813DA0833
+:1063B000A65504330C2826C82E75002D54AC2E5437
+:1063C000AB2E54AA2326E62326E52E26E7D10F007E
+:1063D0006C100613D99417D9E224723D2232937FB0
+:1063E0002F0B6D08052832937F8F0263FFF3C0C423
+:1063F000C0B01AD973C051D94004593929A4206EAC
+:1064000044020BB502C3281ED96EDDB025E4220577
+:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
+:1064200016D9E11DD9ED94102A724517D9AB6DA983
+:106430004BD450B3557A5B17DF50756B071FD9608B
+:106440008FF00F5F0C12D9A302F228AE2222D68160
+:10645000D54013D9A0746B0715D95A855005450C42
+:10646000035328B145A73FA832A93322369D2236CF
+:106470009E2436802B369F2BF48B2CF48C14D969F8
+:1064800024424DC030041414C84C6D0806B13304C6
+:106490001414C84263FFF20015D947C44000310408
+:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
+:1064B00018D95D2B824D29824E29A5202882537A36
+:1064C000871E2C54008E106FE45D12D93D2F2121C0
+:1064D0002321202F251F04330C23252023251ED103
+:1064E0000FC06218D99F88807E87D98910265400F2
+:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
+:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
+:106510002AB1200A1A1403AA0C2AB5202AB5212A66
+:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
+:10653000202BC51F03DD0C2DC5202DC51ED10F003E
+:106540006C100619D91F14D98612D93615D9A3C7CC
+:106550003FC0E02E56A82E56A92E56AA2E56AB2383
+:10656000262918D946DB101CD99DC0D42A42452DB6
+:1065700016012C160000B0890A880C98905BFF94D5
+:106580002C22E318D90F0C5C149C842B22E48C84FD
+:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
+:1065A0002A86062922CD0959142986072F22892FE8
+:1065B00086095BFF435BFF1423463BC1B01ED90035
+:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
+:1065D000E5025804965BFEBD5BFE96C050C0B01647
+:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
+:1065F0002DC0306000440000007F9F0FB155091985
+:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
+:106610005008580A28822C2B0A000B8000005104D5
+:10662000D2A0C091C7AF00991A0A99039912CE3827
+:1066300064206BD3202B20072516032C12022A621C
+:10664000827CA86318D8DC01110208580A28822C21
+:10665000DA500B8000D2A0643FD58A310A8A140434
+:10666000AA01C82A2B22010B8B1404BB017BA9456C
+:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
+:1066800019D8CD1AD91488130ADA28DC801DD951FB
+:1066900009880A28823C0DAA080B8000652F93D335
+:1066A00020C0B063FF9400007FAF34B155005004A8
+:1066B0000A091963FF42DAB07B7B081AD8C12AA203
+:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
+:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
+:1066E000645F60C050C0B0C7CE9C1263FF5500000D
+:1066F0006C100427221EC08008E4311BD8AF0002B2
+:10670000002AB28219D8AF003104C06100661A298C
+:1067100091020A6A022AB68209E43115D90C0C38B2
+:1067200011A8532832822432842A8CFC7841102903
+:1067300021022A368297A0096902292502D10F0079
+:106740002B21022C32850B6B022CCCFC2C36829731
+:10675000C02B2502D10F00006C1004C0E71DD89299
+:106760001CD8940D4911D7208B228A200B4B0BD2B9
+:10677000A007A80C9B72288CF4C8346F8E026000AE
+:10678000A31FD88AA298AF7B78B334C93DC081C01B
+:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
+:1067A0000500308800508C887008980878B16DD248
+:1067B000A09870D10FC0F0038F387FE0DE63FFD860
+:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
+:1067D0005002F5380505426450792CD67E0B3612EE
+:1067E0002F6C100F4F366DFA0500808800208C0644
+:1067F000440CC081C05003B208237C0C03853805CB
+:10680000054264505A2CD67ED30F6D4A050020886D
+:1068100000308CD2A0A798BC889870D10FD2A0BCB1
+:10682000799970D10FD2302BAD08C0F1C0500BF563
+:1068300038050542CB542CD67E083F14260A100F8B
+:10684000660C0646366D6A0500208800B08C8270A2
+:1068500063FF2D00C05003F53875E08063FF7A00B8
+:10686000C06002863876E09F63FF9900C05003F550
+:106870003875E0C463FFBE006C1004D62068520F68
+:10688000695324DA20DB30DC405800F7D2A0D10F66
+:10689000DA20DB30DC405800F49A2424240EC02196
+:1068A00022640FC020D10F00B83BB04C2A2C748951
+:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
+:1068C0002D240E2890072D9003A488B088B1DD2DCB
+:1068D00094032894075BFFA069511DC0E082242A1D
+:1068E000600F18D8BF2A240329600E8F202924079F
+:1068F00008FF029F209E64D10FC020D10F0000002E
+:106900006C1004942319D8B7C0B3083A110BAA022B
+:10691000992019D8299A2116D827C05028929D2548
+:1069200064A2288C1828969DD10F00006C100428B2
+:106930002066C038232406B788282466D10F0000BB
+:106940006C10060D3C111AD819D820035B0C862256
+:106950000D55118221AA8902320B928105630C9395
+:10696000820C550C792B54CB531CD8111DD80FC059
+:10697000F7A256C031C0A0043A380A0A42769343BF
+:10698000044302C9AB2CD67ED30F6DBA0500208814
+:1069900000308C8281A25272917D92818382C83EA6
+:1069A000D10FC071C06002763876F0DB63FFD5008E
+:1069B000C020BC89998199809282D10F222DF892B2
+:1069C0008163FFA219D7FA02860CA9669611D940F5
+:1069D000063612961006BB0C64A0442CD67E8A1094
+:1069E000D30F6DAA0500208800908CBC828311C053
+:1069F000E0A433240A01034E380E0E42CAEC2CD612
+:106A00007E6DBA0500208800308C821102520CA2E3
+:106A100082BC22928163FF83BC82928163FF7C00EF
+:106A2000C06002363876F0B563FFAF00C070024731
+:106A30003877F0CC63FFC6006C100414D7EBC1525A
+:106A4000A424CA3128221D73811C292102659016B5
+:106A50002A300075A912022A02033B022C3007C01B
+:106A6000D25801D5653FDCD10F2B300703BB0B0B90
+:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
+:106A80006C1004292006C0706E9741292102C08F26
+:106A90002A2014C0B62B240606AA022A24147980C0
+:106AA000022725022A221E2C221D7AC10EC8ABDA2B
+:106AB00020DB302C0A00033D025BF7F96450892D7E
+:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
+:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
+:106AE000D7B529A29EC08A798B712BE22668B004A3
+:106AF0008C207BC96629A29D1FD7B264905D9790B8
+:106B0000C0C31DD7C62B21049D9608BB110CBB0228
+:106B10009B919B971CD7C3C08527E4A22BA29D28DD
+:106B200024068DFA282102B0DD2BBC302BA69D9DBA
+:106B3000FA0C8802282502C8D2C020D10F8EF91283
+:106B4000D7B92E2689C020D10F283000688938DABD
+:106B500020DB30DC4058004463FF6300022A022B34
+:106B60000A065800D3220A00D10F655010293000C0
+:106B7000689924022A02033B02DC4058003BC020F3
+:106B8000D10FD270D10F00002A2C74033B02044CA9
+:106B9000025BFEF163FF2700DB30DC402A2C745BD4
+:106BA000FEEEC020D10F00006C1004C83F8926887B
+:106BB00029A399992609880C080848282525CC522C
+:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
+:106BD0006C1004D820D73082220D451105220C926A
+:106BE0008264207407420B13D771D420A3837323CC
+:106BF00002242DF8858074514CBC82C0906D08161B
+:106C000000408800708C773903D720C0918680744B
+:106C10003901D42074610263FFE2CA98C097C04171
+:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
+:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
+:106C400000208800308C9780D270D10FBC8FC0E0BC
+:106C50000F4E387E90E263FFD6BC8292819280C054
+:106C6000209282D10F0000006C1006C0D71CD74EB6
+:106C70001BD7500D4911D7202E221F28221D0E4E42
+:106C80000BD280078A0C2E761F2AAC80C8346FAED8
+:106C9000026000CB2F0A801AD754A29EAA7A7EA344
+:106CA0003FC93FC0E1C05002E538050542CA552B37
+:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
+:106CC000721DAE9E0EA50C645086D2802E761DC01D
+:106CD00091298403D10FC05003E53875D0D363FFE9
+:106CE000CD15D741027E0CA5EE643051C0A1250A16
+:106CF0000002A538033A020505426450922BC67E75
+:106D00000E35129510255C10054536D30F6D5A05CA
+:106D100000A08800208CC0A1A3E2C05023FA800309
+:106D2000730C03A538AF730505426450722BC67E01
+:106D3000851005450C6D5A0500208800308CD280E6
+:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
+:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
+:106D6000D2302E8D08C0F1C0500EF538050542CB4B
+:106D7000592BC67E0A3F14C1600F660C064636D3F7
+:106D80000F6D6A0500208800E08C22721D63FF03EE
+:106D9000C061C05003653875D80263FF6263FF5C51
+:106DA000C05002A53875D08763FF8100C06003F62C
+:106DB0003876D0BF63FFB9006C10042A2015292053
+:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
+:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
+:106DE0000A472A2415CAAF8B438942B0A8009104F0
+:106DF00000881AA8FF0FBB029B278F260FB80C78BC
+:106E00003B1AC020D10F0000292102C0A20A99021A
+:106E1000292502C021D10F008B2763FFDC2BD12055
+:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
+:106E30008B438C288F42B0AD00F10400DD1AADCC3D
+:106E40000CBB029B27DA20B7EB580019C021D10FE9
+:106E50009F2763FFEF0000006C100428203C643083
+:106E60004705306000073E01053EB156076539050C
+:106E70004928C77FA933030641076603B1660606A2
+:106E800041A6337E871E222125291AFC732B150269
+:106E9000380C09816000063E01023EB124064239E9
+:106EA00003220AD10FD230D10FC05163FFC00000BE
+:106EB0006C100427221EC08008E4311DD6BF0002DA
+:106EC000002CD2821BD6BF003104C06100661A2B91
+:106ED000B1020C6C022CD6820BE43119D7440C3A67
+:106EE00011AA932832829780253282243284B455A5
+:106EF00025368275410A292102096902292502D114
+:106F00000F2A21022B32830A6A022B36822A25029B
+:106F1000D10F00006C100418D6A80C2711087708B0
+:106F2000267286253C04765B1315D6A405220A2218
+:106F300022A3682002742904227285D10FC020D1B7
+:106F40000F0000006C100419D6A727221EC080096C
+:106F5000770208E4311DD6980002002CD2821BD69D
+:106F600098003104C06100661A2BB1020C6C022C2F
+:106F7000D6820BE43119D71D0C3A11AA932832821C
+:106F80009780253282243284B45525368275410B90
+:106F90002A21020A6A022A2502D10F002B21022C83
+:106FA00032830B6B022C36822B2502D10F0000009E
+:106FB0006C10041BD6810C2A11ABAA29A286B43806
+:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
+:106FD000290868B00274B90D299D0129901F6E928D
+:106FE0000822A285D10FC020D10FC892C020D10F96
+:106FF000DA205BEE88C020D10F0000006C10041472
+:10700000D66E28429E19D66B6F88026000BA29920C
+:10701000266890078A2009AA0C65A0AC2A429DC068
+:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
+:10703000C28609B90A7ED30260009A2992A3689099
+:10704000078D2009DD0C65D08C25C2856450862D06
+:107050002104C0306ED80D2C2066B8CC0C0C472C07
+:10706000246665C07B1CD6E218D66B1AD66219D688
+:10707000731DD667C0E49E519D508F209357935542
+:1070800099539A569A5408FF021AD6839F5288261B
+:107090009F5A9E599D58935E9C5D935C9A5B08082D
+:1070A00048058811985FC0D81FD64C0CB911A49917
+:1070B000289285AFBF23F4CF288C402896858E2652
+:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
+:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
+:1070E0000FDBD05BFE072324662B200C63FF7500AB
+:1070F000C72FD10FC72FD10F6C1004C85B292006F2
+:1071000068941C689607C020D10FC020D10FDA20E8
+:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
+:107120002E200C18D6250CEF11A8FF29F286C08856
+:10713000798B791AD6220AEA0A2AA2A368A0048BBC
+:10714000207AB96823F2856430621BD62C290A8024
+:107150002C20682820672D21040B881104DD1108DC
+:10716000DD020DCC02C0842D4A100DCC021DD624A8
+:1071700098319D308A2B99379C340BAA02C0C09C51
+:10718000359C369A322A2C74DB4028F285C0D328ED
+:107190008C2028F6852C25042D24061FD60FDD40D3
+:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
+:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
+:1071C00024160128ACF86583862B2122C0F22A21DF
+:1071D00024CC572AAC010A0A4F2A25247ABB026024
+:1071E000037F2C21020C0C4C65C3192E22158D3205
+:1071F000C0910EDD0C65D39088381ED5EF64836B8B
+:107200008C37C0B8C0960CB9399914B49A9A120D3B
+:10721000991199138F6718D5EAC9FB2880217F83BC
+:10722000168B142C22002A200C5BFF61D4A064A3CF
+:10723000B38F6760002800002B200C89120CBA1154
+:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
+:10725000A368D00488207D893024A28564436427F4
+:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
+:1072700070C1C42D211F5BFEF889268827DDA00977
+:10728000880C7A8B179A10600006C04063FFCC0010
+:1072900000DA208B105BFEC88D1065A267C0E09EEF
+:1072A000488C649C498B658A669B4A9A4B97458FAC
+:1072B000677F7302600120CD529D10DA20DB302CF5
+:1072C00012015BFE698D10C051D6A08FA7C0C08A85
+:1072D00068974D9A4C8869896A984E994F8E6A8A48
+:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
+:1072F0000B8E1477B701C0A1C091C08493159D1760
+:107300009516C0D025203CC030085801089338C0DD
+:1073100082083310085B010535400B9D3807DD10EE
+:107320000BAB100E19402A211F07991003DD020D27
+:10733000BB020553100933020A55112921250A2AD7
+:10734000140929140499110A99020933028A2B2974
+:1073500021040BAA021BD6270899110955020855CA
+:10736000020BAA029A408920881408991109880200
+:1073700019D5A61DD62109880298418B2A934695D6
+:107380004783150DBB0285168D179B448A65896658
+:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
+:1073A00088268E29AD87972607EE0C0E0E482E25CF
+:1073B000259B672B200C87131ED5800CB911AE9925
+:1073C000289285A78828968517D584C090A7BB29C1
+:1073D000B4CF871863FE3C008C60C0E0C091C0F061
+:1073E000C034C0B82A210428203C08AA110B8B0104
+:1073F000038301039F380B9B39C03208FF100388B9
+:1074000001089E380C881407EE100FEE0203880165
+:1074100008983905BF1029211F0ABB1107881008D9
+:10742000FF020BAA0218D57809291403AA022B21FE
+:107430002583200B2B1404BB110833110FBB020B47
+:1074400099028B148F2A0B33020833028B2B647042
+:10745000868868974D984C8769886A9341994697C2
+:107460004E984FC07077C701C0719A4718D5E30B8B
+:107470007C100CEC0208F802984418D5E00CBC0211
+:1074800008CC029C402A200C295CFEC0801FD54AF3
+:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
+:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
+:1074B0002524B1AA2A26156490DBC9D28F262E2254
+:1074C000090DFF082F26060FEE0C0E0E482E25255F
+:1074D0006550E4C020D10F00C07093419F4499468D
+:1074E0009A4777C70A1CD5362CC022C0810C873832
+:1074F0001CD5C40B781008E80208B8020C88029862
+:107500004063FF8000CC57DA20DB608C115BFDD636
+:10751000292102689806689403C020D10F2B221EEF
+:10752000C0A029221D2A25027B9901C0B064BFE8B2
+:1075300013D5212CB00728B000DA2003880A28824E
+:107540004CC0D10B8000DBA065AFE763FFCA000031
+:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
+:10756000D10FC16DC19D29252C60000429252CD681
+:10757000902624672F2468DA20DB308C11DD502E12
+:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
+:1075900063FFDD000000C8DF8C268B29ADCC9C2664
+:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
+:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
+:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
+:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
+:1075E000252463FF1FDA202B200C5BFE5663FF145B
+:1075F00012D5858220028257C82163FFFC12D581F3
+:1076000003E83004EE3005B13093209421952263D5
+:10761000FFFC000010D57D910092019302940311AC
+:10762000D554821001EA30A21101F031C04004E4C7
+:107630001600020011D5768210234A00032202921E
+:107640001011D540C021921004E4318403830282DA
+:1076500001810000D23001230000000010D56D919F
+:107660000092019302940311D543821001EA30A2E3
+:107670001101F131C04004E41600020011D564820A
+:107680001013D4E7032202921004E431840383022E
+:107690008201810000D330013300000010D55E91DB
+:1076A00000810165104981026510448103CF1F925A
+:1076B000019302940311D531821001EA30A2110125
+:1076C000F231C04004E41600020011D550821013BC
+:1076D000D4CF032202921004E43184038302820196
+:1076E000C010910391029101810000D43001430048
+:1076F00012D500C03028374028374428374828376B
+:107700004C233D017233ED03020063FFFC000000D7
+:1077100010D542910092019302940311D54082103A
+:10772000921011D4F28310032202921011D53D124F
+:10773000D5049210C04004E41600020011D5348232
+:107740001013D4EB032202921004E4318403830269
+:107750008201810000D53001530000006C10026EE0
+:10776000322FD620056F04043F04745B2A05440CB5
+:1077700000410400331A220A006D490D73630403AB
+:10778000660CB1220F2211031314736302222C0121
+:10779000D10FC83BD10F000073630CC021D10F0083
+:1077A0000000000044495630C020D10F6C10020088
+:1077B00040046B4C07032318020219D10F0203196E
+:1077C000C020D10F6C100202EA30D10F6C1002CC35
+:1077D0002503F03160000F006F220503F1316000D6
+:1077E000056F230503F231000200D10F6C1002CCAB
+:1077F0002502F030D10F00006F220402F130D10FCA
+:107800006F230402F230D10FC020D10F6C1002227E
+:107810000A20230A006D280E2837402837442837CD
+:107820004828374C233D01030200D10F6C1002029F
+:10783000E431D10F0A0000004368656C73696F2062
+:1078400046572044454255473D3020284275696CD3
+:1078500074204D6F6E204D61722020382031373AF0
+:1078600032383A3135205053542032303130206F85
+:107870006E20636C656F70617472612E61736963F1
+:1078800064657369676E6572732E636F6D3A2F68F6
+:107890006F6D652F66656C69782F772F66775F3718
+:1078A0002E392D6977617270292C205665727369A3
+:1078B0006F6E2054337878203030372E30612E3080
+:1078C00030202D20313030373061303010070A0041
+:0478D0000BDFE8756D
+:00000001FF
diff --git a/firmware/cxgb3/t3fw-7.4.0.bin.ihex b/firmware/cxgb3/t3fw-7.4.0.bin.ihex
deleted file mode 100644
index 38dda94..0000000
--- a/firmware/cxgb3/t3fw-7.4.0.bin.ihex
+++ /dev/null
@@ -1,1917 +0,0 @@
-:1000000060007400200380002003700000001000D6
-:1000100000002000E100028400070000E1000288E7
-:1000200000010000E0000000E00000A0010000006E
-:1000300044444440E3000183200200002001E0002A
-:100040002001FF101FFFD0001FFFC000E300043C91
-:100050000200000020006B741FFFC29020006BBCE8
-:100060001FFFC29420006BFC1FFFC29820006C7021
-:100070001FFFC29C200003C0C00000E43100EA3131
-:1000800000A13100A03103020002ED306E2A05000C
-:10009000ED3100020002160012FFDBC03014FFDA5F
-:1000A000D30FD30FD30F03431F244C107249F0D347
-:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
-:1000C000D30FD30F03431F244C107249F0D30FD327
-:1000D0000FD30F14FFCE03421F14FFCB03421F1296
-:1000E000FFCCC0302D37302D37342D37382D373CED
-:1000F000233D017233ED00020012FFC4C0302F37E0
-:10010000002F37102F37202F3730233D017233ED6A
-:1001100000020012FFBEC0302737002737102737F4
-:1001200020273730233D017233ED03020012FFB95F
-:1001300013FFBA0C0200932012FFB913FFB90C028F
-:1001400000932012FFB8C0319320822012FFB71312
-:10015000FFB7932012FFB715FFB316FFB6C030D715
-:100160002005660160001B00000000000000000088
-:10017000043605000200D30FD30F05330C6E3B1479
-:100180000747140704437631E604360505330C6F40
-:100190003BED00020012FFA615FFA3230A00D720A3
-:1001A000070443043E0505330C0747146F3BF00377
-:1001B000020012FFA1C03014FFA1D30FD30FD30F41
-:1001C0009340B4447249F2D30FD30FD30F14FF9B63
-:1001D000834014FF9B834012FF9B230A0014FF9A65
-:1001E000D30FD30FD30F9340B4447249F2D30FD33C
-:1001F0000FD30F14FF95834012FF95C92F832084DE
-:10020000218522BC22743B0F8650B4559630B433FE
-:100210007433F463FFE60000653FE1655FDE12FFC3
-:100220007C230A0028374028374428374828374C91
-:10023000233D017233ED03020000020012FF7AC079
-:1002400032032E0503020012FF7813FF819320C0B2
-:1002500011014931004831010200C00014FF7E0441
-:10026000D23115FF7D945014FF7D04D33115FF7CEE
-:10027000945014FF7C04D43115FF7C24560014FFE5
-:100280007B04D53115FF7B24560010FF7A03000054
-:10029000000000000000000000000000000000005E
-:1002A000000000000000000000000000000000004E
-:1002B000000000000000000000000000000000003E
-:1002C000000000000000000000000000000000002E
-:1002D000000000000000000000000000000000001E
-:1002E000000000000000000000000000000000000E
-:1002F00000000000000000000000000000000000FE
-:1003000000000000000000000000000000000000ED
-:1003100000000000000000000000000000000000DD
-:1003200000000000000000000000000000000000CD
-:1003300000000000000000000000000000000000BD
-:1003400000000000000000000000000000000000AD
-:10035000000000000000000000000000000000009D
-:10036000000000000000000000000000000000008D
-:10037000000000000000000000000000000000007D
-:10038000000000000000000000000000000000006D
-:10039000000000000000000000000000000000005D
-:1003A000000000000000000000000000000000004D
-:1003B000000000000000000000000000000000003D
-:1003C000000000000000000000000000000000002D
-:1003D000000000000000000000000000000000001D
-:1003E000000000000000000000000000000000000D
-:1003F00000000000000000000000000000000000FD
-:1004000000000000000000000000000000000000EC
-:1004100000000000000000000000000000000000DC
-:1004200063FFFC000000000000000000000000006E
-:100430000000000000000000000000001FFC0000A1
-:100440001FFC0000E30005C81FFC00001FFC0000AB
-:10045000E30005C81FFC00001FFC0000E30005C806
-:100460001FFFC0001FFFC000E30005C81FFFC00042
-:100470001FFFC018E30005C81FFFC0181FFFC018EA
-:10048000E30005E01FFFC0181FFFC290E30005E076
-:100490001FFFC2901FFFC290E30008581FFFC290C9
-:1004A0001FFFC58CE3000858200000002000016AEF
-:1004B000E3000B542000018020000180E3000CC009
-:1004C0002000020020000203E3000CC02000021CF8
-:1004D00020000220E3000CC420000220200002269D
-:1004E000E3000CC82000023C20000240E3000CD0D6
-:1004F0002000024020000249E3000CD42000024CFE
-:1005000020000250E3000CE02000025020000259BD
-:10051000E3000CE42000025C20000260E3000CF029
-:100520002000026020000269E3000CF42000026C4D
-:1005300020000270E3000D0020000270200002790C
-:10054000E3000D042000028C2000028CE3000D105B
-:100550002000029020000293E3000D10200002AC66
-:10056000200002B0E3000D14200002D0200002F2AF
-:10057000E3000D18200003B0200003B0E3000D3CA1
-:10058000200003B0200003B0E3000D3C200003B0C6
-:10059000200003B0E3000D3C200003B0200003B0B6
-:1005A000E3000D3C200003B020006D94E3000D3CFF
-:1005B00020006D9420006D94E3007720000000007F
-:1005C00000000000000000001FFC00001FFC0000F5
-:1005D0001FFFC5901FFFC67020006D9820006D980A
-:1005E000DEFFFE000000080CDEADBEEF1FFFC2A064
-:1005F0001FFCFE001FFFC0941FFFC5C0300000009D
-:10060000003FFFFF8040000010000000080FFFFFC8
-:100610001FFFC26D000FFFFF804FFFFF8000000033
-:1006200000000880B000000560500000600000007D
-:1006300040000011350000004100000010000001E2
-:100640002000000000001000400000000500000035
-:1006500080000019040000000000080010000005E0
-:10066000806000007000000020000009001FF800FA
-:100670008000001EA0000000F800000007FFFFFF40
-:100680000800000018000000010080014200000086
-:100690001FFFC21D1FFFC0DC000100806040000082
-:1006A0001A0000000C0000001000000A00003000DA
-:1006B000600008008000001C000100008000001A9B
-:1006C00080000018FC0000008000000100004000D5
-:1006D000030000008000040050000003FFFFBFFF84
-:1006E0001FFFC3D400000FFFFFFFF000000016D073
-:1006F0000000FFF7A50000001FFFC4B01FFFC4618A
-:100700000001000800000B20202FFF801FFFC455B0
-:1007100000002C00FFFEFFF800FFFFFF1FFFC57861
-:1007200000002000FFFFDFFF0000FFEF01001100CD
-:100730001FFFC3D21FFFC590FFFFEFFF0000FFFBAD
-:100740001FFFC6301FFFBEA0FFFFF7FF1FFFC064E3
-:100750000000FFFD1FFFC6200001FBD01FFFC5B03A
-:100760001FFFC6601FFFC591E0FFFE001FFFC5A071
-:10077000000080001FFFC53C1FFFC5B41FFFC068FD
-:100780001FFFC4D01FFCFFD8000100817FFFFFFFC7
-:10079000E1000600000027101FFCFE301FFCFE7069
-:1007A000E10002001FFFC5381FFFC5500003D090B5
-:1007B0001FFFC5642B5063802B5079802B50908095
-:1007C0002B50A6801FFFC4690100110F202FFE00CF
-:1007D00020300080202FFF000000FFFF0001FFF805
-:1007E0002B50B2002B50B208000100102B50B180EA
-:1007F0002B50B2802B50BA00000100112B50BD28A5
-:100800002B50BC802B50BDA020300000DFFFFE002D
-:100810005000000200C0000002000000FFFFF7F4DB
-:100820001FFFC06C000FF800044000000010000023
-:100830000C4000001C400000E00000A01FFFC5406D
-:100840001FFD00081FFFC5541FFFC5681FFFC57CA3
-:10085000E1000690E10006EC00000000000000004E
-:100860000000000000000000010000000000000087
-:100870000000000000000000201000402010004098
-:100880002010004020140080200C0000200C0000EC
-:10089000200C000020100040201400802014008054
-:1008A00020140080201800C0201C0100201C010022
-:1008B000201C010020200140201800C0201800C08A
-:1008C000201800C0201C0100201800C0201800C003
-:1008D000201800C0201C01002020014020200140E1
-:1008E00020200140202009402020094020200940EC
-:1008F0002020094020240980FFFFFFFFFFFFFFFFAA
-:10090000FFFFFFFF000000000000000000000000EB
-:100910000000000000000000200054902000536000
-:1009200020005490200054902000529C2000529CA3
-:100930002000529C200050DC200050DC200050D4CD
-:100940002000504020004EE820004CC820004A9C67
-:100950000000000000000000200054602000532C24
-:10096000200053D0200053D0200051842000518417
-:10097000200051842000518420005184200050CC5C
-:100980002000518420004E0820004C7820004A4866
-:10099000000000000000000020000BE820003A30BA
-:1009A000200004C02000463C20000BE0200041480D
-:1009B000200003F0200045FC20004A2420003E5483
-:1009C00020003D70200039AC2000383C200035ACC0
-:1009D0002000310C20003BCC20002D6C2000280092
-:1009E000200067182000238C2000206C2000201895
-:1009F00020001D04200018182000154820000E2C8F
-:100A000020000C2C2000110C200012F82000434084
-:100A100020003E0820000BF0200004C00000000071
-:100A200000000000000000000000000000000000C6
-:100A300000000000000000000000000000000000B6
-:100A400000000000000000000000000000000000A6
-:100A50000000000000000000000000000000000096
-:100A60000000000000000000000000000000000086
-:100A70000000000000000000000000000000000076
-:100A80000000000000000000000000000000000066
-:100A900000000000000000000000000032640000C0
-:100AA0000000000032640000640064006400640020
-:100AB00064006400640064000000000000000000A6
-:100AC0000000000000000000000000000000000026
-:100AD0000000000000000000000000000000000016
-:100AE0000000000000000000000000000000000006
-:100AF00000000000000000000000000000000000F6
-:100B000000001000000000000000000000000000D5
-:100B100000000000000000000000100000000000C5
-:100B200000000000000000000000000000432380DF
-:100B300000000000000000000000000000000000B5
-:100B400000000000000000000000000000000000A5
-:100B500000000000005C94015D94025E94035F94C9
-:100B60000043000000000000000000000000000042
-:100B70000000000000000000000000000000000075
-:100B80000000000000000000000000000000000065
-:100B900000000000005C90015D90025E90035F9099
-:100BA00000530000000000000000000000000000F2
-:100BB0000000000000000000000000000000000035
-:100BC0000000000000000000000000000000000025
-:100BD00000000000009C94001D90019D94029E94D2
-:100BE000039F94040894050994060A94070B940043
-:100BF00043000000000000000000000000000000B2
-:100C000000000000000000000000000000000000E4
-:100C100000000000009C90019D90029E90071D9096
-:100C2000039F90047890057990067A90077B900056
-:100C30005300000000000000000000000000000061
-:100C400000000000000000000000000000000000A4
-:100C50000000000000DC94001D9001DD9402DE9491
-:100C600003DF94040494050594060694070794088A
-:100C700008940909940A0A940B0B9400430000009D
-:100C80000000000000000000000000000000000064
-:100C90000000000000DC9001DD9002DE900B1D9052
-:100CA00003DF9004B49005B59006B69007B790089E
-:100CB000B89009B9900ABA900BBB9000530000009D
-:100CC00063FFFC0020006B5010FFFF0A00000000D3
-:100CD00020006B7400D23110FFFE0A0000000000FB
-:100CE00020006BBC00D33110FFFE0A0000000000A2
-:100CF00020006BFC00D43110FFFE0A000000000051
-:100D000020006C7000D53110FFFE0A0000000000CA
-:100D100063FFFC00E00000A012FFF7822002825770
-:100D2000C82163FFFC12FFF303E83004EE3005C076
-:100D30003093209421952263FFFC00001FFFD00018
-:100D4000000400201FFFC5901FFFC670200A00117D
-:100D5000FFFB13FFFB03E63101020016FFFA17FF4A
-:100D6000FAD30F776B069060B4667763F85415B5C5
-:100D7000541A610F140063FFF90000006C1004C0E6
-:100D800020D10F006C1004C0C71AEF06D830BC2B5E
-:100D9000D72085720D4211837105450B9572023380
-:100DA0000C2376017B3B04233D089371A32D12EEA7
-:100DB000FE19EEFEA2767D632C2E0A000882022820
-:100DC0000A01038E380E0E42C8EE29A67E6D4A0532
-:100DD00000208800308C8271D10FC0F0028F387FE4
-:100DE000C0EA63FFE400C0F1C050037E0CA2EE0E27
-:100DF0003D1208820203F538050542CB5729A67E2D
-:100E00002FDC100F4F366DFA0500208800308CBCA7
-:100E100075C03008E208280A01058338030342C977
-:100E20003E29A67E0D480CD30F6D8A050020880050
-:100E3000B08C8271D10FC05008F53875C0C163FF06
-:100E4000BBC06002863876C0DA63FFD46C1012161D
-:100E5000EED8C1F9C1E8C1C72B221E28221DC0D07F
-:100E60007B81312920060BB702299CFA655008289E
-:100E70002072288CFF28247264915C2AB0000CA890
-:100E80000C6481670EA90C6492B37FA13769AC2F03
-:100E90006000340000282006D7D0288CFACC572ACE
-:100EA00020722AACFF2A24726481352AD0000CA952
-:100EB0000C6491640EAC0C64C31B7FA10768AC0783
-:100EC000C020D10F002D25028A32C0900A6E5065D5
-:100ED000E5B5292467090F4765F5B12C200C1FEEF5
-:100EE000B50CCE11AFEE29E286B4487983026005D5
-:100EF0008219EEB109C90A2992A36890078F2009C7
-:100F0000FF0C65F56E2FE28564F56865559628221D
-:100F10001D7B8105D9B060000200C0908B9417EE54
-:100F2000A70B881487740B0B47A87718EEA509BB8D
-:100F30001008770297F018EEA317EEA408A8010B8B
-:100F400088020747021BEEA097F10B880298F22750
-:100F500090232B902204781006BB1007471208BB81
-:100F6000022890210777100C88100788020B88024E
-:100F700017EE988B3307BB0187340B880298F397E1
-:100F80009997F48B9587399BF588968B3898F688D6
-:100F90009797F99BF898F717EE8F28E28507C7080F
-:100FA0002D74CF08480B28E68565550F2B221E2887
-:100FB000221D7B89022B0A0064BF042CB00728B0D5
-:100FC00000DA2006880A28824CC0D10B8000DBA002
-:100FD00065AFE763FEE90000292072659E9C60040E
-:100FE000E72A207265AEC36004DE00002EB0032C39
-:100FF0002067D4E065C1058A328C330AFF500C4566
-:1010000054BC5564F4EB19EE74882A09A9010988C7
-:101010000C64821FC0926000DD2ED0032A2067D4AA
-:10102000E065A0D88A328B330AFC500B4554BC557E
-:1010300064C4BE19EE69882A09A9017989D50BEA29
-:101040005064A4E30CEE11C0F02F16132E16168A6E
-:10105000E78CE82A16128EE9DFC0AAEA7EAB01B15E
-:10106000CF0BA8506583468837DBC0AE89991E78C0
-:101070009B022BCC012B161B29120E2B0A002916C2
-:101080001A7FC3077FC9027EAB01C0B165B49D8BD7
-:10109000352F0A002A0A007AC30564C3CB2F0A0140
-:1010A00065F4892B12162B1619005104C0C100CC0F
-:1010B0001A2CCCFF2C16170CFC132C16182B121AFA
-:1010C0002A121BDC50581974C0D0C0902E5CF42C2E
-:1010D00012172812182F121B2A121A08FF010CAA25
-:1010E000018834074C0AAB8B2812192BC6162F86A1
-:1010F000082A86092E74102924672E70038975B179
-:10110000EA2A7403B09909490C659DB32B20672D19
-:10111000250265B3FA2B221E2C221D7BC901C0B00B
-:1011200064BD9C2CB00728B000DA2006880A28820B
-:101130004CC0D10B8000DBA065AFE763FD8189BAAD
-:10114000B19965909788341CEE2598BA8F331EEEBE
-:101150001E0F4F542FB42C8D2A8A320EDD020CAC98
-:10116000017DC9660A49516F92608A3375A65B2C6E
-:10117000B0130AED510DCD010D0D410C0C417DC98F
-:10118000492EB012B0EE65E3C6C0D08E378CB88A57
-:10119000368FB97CA3077AC9027EFB01C0D1CED9B4
-:1011A00088350AAD020E8E0878EB022DAC0189B7A6
-:1011B000DAC0AF9B79BB01B1CADCB0C0B07DA30778
-:1011C0007AD9027CEB01C0B164B161C09129246776
-:1011D000C020D10F00008ADAB1AA64A0C02C206719
-:1011E0002D250265C3111DEDF88A321EEDFD0DADF2
-:1011F000010EDD0C65D28A0A4E516FE20260028157
-:10120000C090292467090F4765F2F828221D7B89C1
-:10121000022B0A0064BCA82CB00728B000DA200614
-:10122000880A28824CC0D10B8000DBA065AFE76341
-:10123000FC8D00000CE9506492ED0CEF11C0802889
-:101240001611AFBF2F16198EF88BF7DAE08FF92B36
-:101250001610ABFB7FBB01B1EA0CA8506580D688A5
-:1012600037DCE0AF89991C789B022CEC012C161B13
-:1012700029120C2C0A0029161A7AE3077AE9027F50
-:10128000BB01C0C165C2A58B352C0A002A0A007AB1
-:10129000E30564E1CA2C0A0164CE0D60028E883435
-:1012A0001BEDCF98DA8F331EEDC80F4F542FD42C7F
-:1012B0008C2A8A320ECC020BAB010CBB0C65BF0A28
-:1012C0000A49516E920263FF018A330AAB5064BE31
-:1012D000F92CD0130AEE510ECE010E0E410C0C412A
-:1012E0000ECC0C65CEE42FD012B0FF65F26EC0B00C
-:1012F0008E378CD88A362FD2097CA3077AC9027E12
-:10130000FB01C0B165BEC38835DBA0AE8E78EB01B2
-:10131000B1AB89D7DAC0AF9D79DB01B1CAC0C07B60
-:10132000A3077AB9027DEB01C0C165CE9DC09029AB
-:101330002467C020D10F88378C3698140CE90C290B
-:10134000161408F80C981D78FB07281214B088288A
-:101350001614891D9F159B16C0F02B121429161AFE
-:101360002B161B8B147AE30B7AE90688158E1678F8
-:10137000EB01C0F165F1BA29121A2F12118A352E2C
-:10138000121B9A1AAFEE2F1210C0A0AF9F79FB016B
-:10139000B1EE9F11881AC0F098107AE30A7EA90571
-:1013A0002A12017A8B01C0F164F0816001838936D1
-:1013B0008B3799170BE80C981F09C90C291615785B
-:1013C000EB07281215B088281615D9C09A199E184F
-:1013D0008A1F2E12152A161A2E161BDAC0C0E08C90
-:1013E000177F930B7FA90688188F1978FB01C0E13E
-:1013F00065E13E29121A2F12138A352E121B9A1BF1
-:10140000AFEE2F1212C0A0AF9F79FB01B1EE9F1378
-:10141000881BC0F098127AE30A7EA9052A12037A83
-:101420008B01C0F165F10A2E12162E16192A121B15
-:10143000005104C0E100EE1AB0EE2E16170EFF1395
-:101440002F16180FCC01ACAA2F121A0EBC01ACFC3F
-:101450007FCB01B1AA2A161B2C161A63FC5E000072
-:101460007FB30263FE3163FE2B7EB30263FC306305
-:10147000FC2A00006450C0DA20DBC0581648C020A7
-:10148000D10FC09163FD7A00C09163FA44DA20DB8A
-:1014900070C0D12E0A80C09A2924682C7007581574
-:1014A00038D2A0D10F03470B18ED4FDB70A8287876
-:1014B00073022B7DF8D9B063FA6100002A2C74DB2B
-:1014C00040580EB363FAE4000029221D2D25027B4B
-:1014D0009901C0B0C9B62CB00728B000DA20068840
-:1014E0000A28824CC0D10B8000DBA065AFE7C0208A
-:1014F000D10FC09163FBFF00022A025802440AA2E6
-:1015000002060000022A025802410AA20206000056
-:10151000DB70DA20C0D12E0A80C09E2924682C708E
-:1015200007581517C020D10FC09463FBC9C096633C
-:10153000FBC4C09663FBBF002A2C74DB30DC405B2D
-:10154000FE11DBA0C2A02AB4002C200C63FF2700F0
-:101550008D358CB77DCB0263FDD263FC6D8F358EEC
-:10156000D77FEB0263FDC563FC6000006C1004C014
-:1015700020D10F006C1004C020D10F006C10042B80
-:10158000221E28221DC0A0C0942924062A25027BE1
-:101590008901DBA0C9B913ED06DA2028B0002CB010
-:1015A0000703880A28824CC0D10B8000DBA065AFFE
-:1015B000E7C020D10F0000006C10042C20062A2167
-:1015C0000268C80528CCF965812E0A094C6591048A
-:1015D0008F30C1B80F8F147FB00528212365812774
-:1015E00016ECF529629E6F98026000F819ECF1295B
-:1015F00092266890078A2009AA0C65A0E72A629DB6
-:1016000064A0E12B200C0CB911A6992D92866FD9FC
-:10161000026000DB1DECE90DBD0A2DD2A368D007E6
-:101620008E200DEE0C65E0C7279285C0E06470BF88
-:101630001DECEE68434E1CECED8A2B0CAA029A704E
-:1016400089200899110D99029971882A98748F320E
-:101650009F75282104088811987718ECDE0CBF11BB
-:10166000A6FF2DF285A8B82E84CF2DDC282DF68577
-:10167000C85A2A2C74DB40580E46D2A0D10FC02085
-:10168000D10F00000029CCF96490B12C206689317B
-:10169000B1CC0C0C472C24666EC60260008509F89C
-:1016A0005065807F1CECD38A2B0F08400B881008F4
-:1016B000AA020CAA029A7089200899110D99029920
-:1016C00071883398738C329C728A2A9A74893499FF
-:1016D0007563FF7D00CC57DA20DB30DC4058151DE8
-:1016E000C020D10F00DA20C0B65815AC63FFE5006A
-:1016F000DA205815AA63FFDC00DA20DB30DC40DD9D
-:1017000050581638D2A0D10FC858DA20DB30581400
-:101710008A2A210265AFBDC09409A9022925026366
-:10172000FFB200002B21045814351DECAFC0E02E91
-:1017300024668F302B200C0F8F1463FF662921380D
-:10174000C08879830263FF5B2C20662B2104B1CC17
-:101750000C0C472C24665814291DECA3C0E02E2441
-:10176000668F302B200C0F8F1463FF376C1004C072
-:10177000B7C0A116ECA015EC92D720D840B822C073
-:10178000400535029671957002A438040442C94B95
-:101790001AEC8519EC8629A67EC140D30F6D4A0547
-:1017A00000808800208C220A88A272D10FC05008C5
-:1017B000A53875B0E363FFD76C1006931394112915
-:1017C0002006655288C0716898052A9CF965A29820
-:1017D00016EC792921028A1309094C6590CD8AA05B
-:1017E0000A6A512AACFD65A0C2CC5FDB30DA208CDE
-:1017F000115814D8C0519A13C7BF9BA98E132EE25B
-:101800000968E0602F629E1DEC6A6FF80260008438
-:101810002DD22668D0052F22007DF9782C629DC735
-:101820009064C0709C108A132B200C2AA0200CBD41
-:1018300011A6DD0A4F14BFA809880129D286AF88F6
-:10184000288C09798B591FEC5C0FBF0A2FF2A36813
-:10185000F0052822007F894729D285D490659075AC
-:1018600060004300002B200C1FEC540CBD11A6DDC2
-:1018700029D2860FBF0A6E96102FF2A368F0048853
-:10188000207F890529D285659165DA20581543C9DD
-:101890005C6001FF00DA20C0B658154060000C0003
-:1018A000C09063FFB50000DA2058153C6551E48D07
-:1018B000138C11DBD08DD0022A020D6D515813AD5F
-:1018C0009A1364A1CEC75F8FA195A9C0510F0F478E
-:1018D0009F1163FEFD00C091C0F12820062C2066F8
-:1018E000288CF9A7CC0C0C472C24666FC6098D13E5
-:1018F0008DD170DE02290A00099D02648159C9D385
-:101900008A102B21045813BD8A13C0B02B24662ED5
-:10191000A2092AA0200E28141CEC338D1315EC27E5
-:10192000C1700A773685562DDC28AC2C9C12DED08F
-:10193000A8557CD3022EDDF8D3E0DA40055B02DC4B
-:10194000305BFF8AD4A028200CB455C0D02B0A8865
-:101950002F0A800C8C11A6CC29C285AF3FAB9929E8
-:10196000C6851CEC1CDEF0AC882D84CF2812022921
-:10197000120378F3022EFDF8289020D3E007880C9C
-:10198000C170080847289420087736657FAB891313
-:1019900013EC1A8990C0F47797491BEC18C1CA2838
-:1019A00021048513099E4006EE1187530488118592
-:1019B000520E88020C88029BA09FA18F2B9DA59898
-:1019C000A497A795A603FF029FA22C200C1EEC0152
-:1019D000AECE0CCC1106CC082BC2852DE4CF2BBC8F
-:1019E000202BC6852A2C748B11580D69D2A0D10FDB
-:1019F00028203DC0E07C877F2E24670E0A4765A023
-:101A00007B1AEBFF88201EEBED8F138EE48FF4081A
-:101A100088110A88020F8F14AFEE1FEBFA98910F0E
-:101A2000EE029E901EEBF9C0801AEBEA2CD285AA3A
-:101A3000BAB8CC28A4CF2CD6852C21022F20720E28
-:101A4000CC02B1FF2F24722C2502C020D10F8713A6
-:101A5000877007074763FD6E282138C099798B028C
-:101A600063FE9ADDF063FE9500DA20DB308C11DD39
-:101A70005058155CD2A0D10FC0E163FF7A8B138C54
-:101A800011DD50C0AA2E0A802A2468DA205813BC1F
-:101A9000D2A0D10FC020D10F6C1006292102C0D0D6
-:101AA0007597102A32047FA70A8B357FBF052D2535
-:101AB000020DD902090C4C65C18216EBBE1EEBBCAF
-:101AC00028629EC0FA78F30260018829E2266890B5
-:101AD000078A2009AA0C65A17A2A629DDFA064A169
-:101AE000772B200C0CBC11A6CC29C286C08C798324
-:101AF0000260015719EBB109B90A2992A36890074E
-:101B0000882009880C65814327C2851CEBB364716A
-:101B10003A8931098B140CBB016FB11D2C20669FD3
-:101B200010B1CC0C0C472C24666EC6026001400933
-:101B3000FF5065F13A8A102AAC188934C0C47F97E7
-:101B40003C18EBB31BEBB28F359C719B708B209DC7
-:101B50007408BB029B72C08298751BEBAE0F0840E5
-:101B60009B730F881198777FF70B2F2102284A006B
-:101B700008FF022F2502C0B4600004000000C0B0BE
-:101B80007E97048F362F25227D97048837282521BC
-:101B90007C9736C0F1C0900AF9382F3C20090942E1
-:101BA00064908619EB8018EB8128967E00F08800FF
-:101BB000A08C00F08800A08C00F08800A08C2A6225
-:101BC0009D2DE4A22AAC182A669D89307797388F1C
-:101BD000338A3218EB8A07BE0B2C2104B4BB04CC29
-:101BE0001198E0C08498E1882B9DE59AE69FE71A5A
-:101BF000EB82099F4006FF110FCC020A880298E28F
-:101C0000C1FC0FCC022CE604C9B82C200C1EEB71D1
-:101C10000CCA11AECC06AA0829A2852DC4CF09B9D9
-:101C20000B29A685CF5CC020D10FC081C0900F8941
-:101C300038C08779880263FF7263FF6600CC57DA89
-:101C400020DB30DC405813C3C020D10FDA205814F9
-:101C50005363FFE8C0A063FE82DA20C0B658144F79
-:101C600063FFD900DB402A2C74580CC9D2A0D10FD5
-:101C70008A102B21045812E11EEB4EC0D02D246691
-:101C800063FEB1006C1006D62019EB491EEB4B2801
-:101C9000610217EB4808084C65805F8A300A6A5178
-:101CA00069A3572B729E6EB83F2A922668A0048CB7
-:101CB000607AC9342A729D2C4CFECAAB2B600CB6DC
-:101CC0004F0CBD11A7DD28D2860EBE0A78FB269CDC
-:101CD000112EE2A32C160068E0052F62007EF91594
-:101CE00022D285CF2560000D00DA60C0B658142BD3
-:101CF000C85A60010F00DA60581428655106DC40AC
-:101D0000DB308D30DA600D6D5158129AD3A064A08B
-:101D1000F384A1C05104044763FF6D00C0B02C6080
-:101D2000668931B1CC0C0C472C64666FC602709684
-:101D30000A2B61045812B1C0B02B64666550B42AF6
-:101D40003C10C0E7DC20C0D1C0F002DF380F0F42EA
-:101D500064F09019EB1418EB1528967E8D106DDA4F
-:101D60000500A08800C08CC0A089301DEB247797A7
-:101D70005388328C108F3302CE0BC02492E1226143
-:101D8000049DE00422118D6B9BE59FE798E61FEB15
-:101D90001A0998400688110822020FDD02C18D9DA4
-:101DA000E208220292E4B4C22E600C1FEB0A0CE897
-:101DB00011A7882C8285AFEE0C220B2BE4CF228654
-:101DC00085D2A0D10F28600CD2A08C1119EB020C87
-:101DD0008D11A988A7DD2ED2852B84CF0ECC0B2C9C
-:101DE000D685D10FC0F00ADF387FE80263FF6C634D
-:101DF000FF6000002A6C74C0B2DC20DD4058128FF6
-:101E0000C0B063FF63C020D10F0000006C10042C31
-:101E1000221D2A221EC049D320293006243468C03E
-:101E2000407AC105DDA060000200C0D06E9738C0C6
-:101E30008F2E0A802B3014C0962934060EBB022E3A
-:101E400031022B34147E8004243502DE407AC10E28
-:101E5000C8ABDBD0DA302C0A00580AE52E31020E6E
-:101E60000F4CC8FEC020D10F6895F8283102080831
-:101E70004C658FEF1AEAD01CEACE2BA29EC09A7B4B
-:101E80009B462BC22668B0048D307BD93B29A29D8E
-:101E9000C0E3CB9394901BEAE02D31049B9608DDC0
-:101EA000110EDD029D979D9112EADDC0E524C4A2CA
-:101EB0002E34062F310228A29D02FF02288C3028E2
-:101EC000A69D2F3502C020D10FDA30C0B65813B30B
-:101ED000C020D10F6C1006292006689805289CF9AF
-:101EE00065825D29210209094C659210CD51DB30D4
-:101EF000DA20044C02581317C051D3A0C7AF2A36BA
-:101F00000AC0E019EAAD1DEAB31FEAAC8A3A16EA44
-:101F1000A9B1AC64C13528629E6F88026001F129C5
-:101F2000DC332992266890078B2009BB0C65B1E051
-:101F300027629DC08E6471D82B200C0CBC11A6CCDE
-:101F400029C2867983026001D219EA9B09B90A295C
-:101F500092A3971068900828220009880C6581BB1D
-:101F600027C2856471B5292006299CF96491EC2C5F
-:101F700020668931B1CC0C0C472C24666EC60260F9
-:101F800001A109F85065819B883689F4088C14AC4E
-:101F9000991CEA8B0C99022C2104997019EAA1086A
-:101FA00008479971892A09881008990218EA9E0839
-:101FB000990299722830132930120488100699105A
-:101FC00008990228302C9A740C881008C8020988D5
-:101FD00002987389379975883898768A39C0819ABA
-:101FE000771AEA918935987B99780989140A9902B8
-:101FF000997A8A30893277A73618EA808F33987CAD
-:10200000C084987D882B2E76112976122F7613198D
-:10201000EA7A0A9F4006FF1104CA110988020FAA32
-:1020200002987EC1F90FAA022A7610C0AA600001A8
-:10203000C0A6ADBF0CBC11A6CC29C2852EF4CF0919
-:10204000A90B29C685655107C020D10F2B200C0C88
-:10205000BC1106CC0828C28609B90A6F8902600142
-:102060002E2992A36890082A220009AA0C65A11FB4
-:102070002AC28564A11928203D08284064808C84E8
-:102080003504841464408485F574537F8436048455
-:1020900014644077745374293013C08C79886CC0F1
-:1020A000902924670908476580ED882089F48435E4
-:1020B0001FEA55048414A4940F440294A014EA5017
-:1020C00008881104880298A1843698A3048414A473
-:1020D000990F990299A219EA4CADB428C2852E44F1
-:1020E000CF288C1028C6852821022F20720988024B
-:1020F000B2FF2F2472282502C020D10F00CC57DA5E
-:1021000020DB30DC40581293C020D10FC09163FF18
-:102110008FDA20C0B658132163FFE100DA2058138C
-:102120001F63FFD88A102B21045811B41DEA2A1FFF
-:10213000EA232B200CC0E02E24668A3A63FE480076
-:1021400000DA20DB30DC40DD505813A6D2A0D10FDE
-:102150002A2C74DB40580B8ED2A0D10F292138C015
-:102160008879830263FE202A12002C20662B21042A
-:102170002CCC010C0C472C24665811A01DEA161F0C
-:10218000EA0F2B200CC0E02E24668A3A63FDF8008B
-:10219000DA2058130263FF64DA205BFF1CD2A0D15F
-:1021A0000F0000006C10089515C061C1B0D9402A1D
-:1021B000203DC0400BAA010A64382A2006291606D1
-:1021C00068A8052CACF965C33B1DE9FC6440052FEC
-:1021D000120564F29C2621021EE9F806064C65628F
-:1021E000E315E9F46440D98A352930039A140A9931
-:1021F0000C6490CC2C200C8B149C110CCC11A5CC15
-:102200009C122CC286B4BB7CB3026002D38F110E29
-:10221000FE0A2EE2A368E0098620D30F0E660C6545
-:1022200062BE88122882856482B6891464905EDA60
-:1022300080D9308C201EE9F21FE9F31DE9E08B14F0
-:102240008DD4D4B07FB718B88A293C10853608C61B
-:10225000110E66029681058514A5D50F550295804D
-:102260000418146D8927889608CB110888140EBBB2
-:1022700002A8D8299C200F88029BA198A088929B35
-:10228000A3088814A8D80F880298A22AAC1019E9CC
-:10229000DEC0C08F141EE9CF86128D11286285AE74
-:1022A000DD08FF0B2CD4CF2821022F66858B352A21
-:1022B0002072098802ABAA2825022A2472C020D1E4
-:1022C0000F29529E18E9BB6F9802600208288226E7
-:1022D00068800829220008990C6591F92A529DC14D
-:1022E000CA9A1364A1EF2B200C2620060CB811A566
-:1022F000882D82860EBE0A7DC3026002022EE2A3F2
-:1023000068E0082F22000EFF0C65F1F3288285DEBD
-:10231000806481FF9810266CF96461FF2C20668828
-:1023200031B1CC0C0C472C24666EC6026001BC088F
-:10233000FD5065D1B617E9BD19E9A21AE9A92C210A
-:10234000048B2D2830102F211D0C88100BFB090C3D
-:1023500088020A880209BB026441528910C04D9B61
-:1023600090979198928D35D9E064D06CD730DBD0BE
-:10237000D8307FD713273C10BCE92632168C39960B
-:10238000E69CE78A37B4389AE80B13146430492A7C
-:10239000821686799A9696978C778A7D9C982B825E
-:1023A000172C7C209A9A2A9C189B99867BB03BB864
-:1023B000896DB9218BC996A52692162AAC18B899B1
-:1023C0009BA196A08BC786CD9BA22B921596A49B12
-:1023D000A386CB2CCC2026A605C0346BD4200D3B85
-:1023E0000C0DD8090E880A7FB705C0909988BC8863
-:1023F000C0900B1A126DAA069988998B288C18C068
-:10240000D01BE98C1CE98B16E981B1FF2A211C2322
-:10241000E6130F0F4F26E6122F251D7FA906C0F0E9
-:10242000C08028251D05F6111AE97A8F202BE615A4
-:102430002CE6162DE61726E6180AFA022AE61429D3
-:102440002006299CF96490FF29200C8D15C0801A64
-:10245000E9610C9C11AA99A5CCDA202BC28528949D
-:10246000CF0B4B0B2BC685C0B08C1658118AD2A04F
-:10247000D10F8A356FA548D8308BD56DA90C8A86C7
-:102480000A8A14CBA97AB337288C10C08028246715
-:10249000080B4765B112DA20DB302C12065811AD5B
-:1024A000D3A0C0C1C0D02DA4039C1563FD268636E1
-:1024B00064610C8910C04D9B909791989263FEA423
-:1024C000C08163FFC78A15CCA7DA20DB308C165891
-:1024D00011A1C020D10FDA20C0B658123063FFE43A
-:1024E00000DA208B1158122D63FFD9009E178A1332
-:1024F0002B21045810C28E17C0B02B246663FE3403
-:10250000C08063FE09DA20DB308C16DD505812B52E
-:10251000D2A0D10FDA2058122163FFA82D2138C094
-:10252000C87DC30263FE0D8A132B21042C206698FC
-:1025300017B1CC0C0C472C24665810B08E17C0D0A5
-:102540002D246663FDEE0000262138B06606064F96
-:10255000262538656EF128206A7F870508294164A1
-:1025600090A5C0D01BE92619E93426200723E61BD5
-:10257000B16609FA022BE61A28200A2DE61D2AE682
-:102580001E09880228E61C882606064728E6202B16
-:10259000220826E53E2BE6212D24072C20062A20A2
-:1025A0006468C347B44463FE9EDB30DA208D15C0F7
-:1025B000CE2E0A802C24688C165810F1D2A0D10F90
-:1025C0008E102A321616E8FD0A2A1486662BE612A9
-:1025D00097E127E61328E614AA6609660296E02E1C
-:1025E000EC4869ED50C14663FD7A000064AFB41950
-:1025F000E8F328201689920A880C00910400881AB2
-:10260000A8B8982963FF9C002B21046EB81E2C20CB
-:1026100066B8CC0C0C472C2466C9C09E178A135888
-:1026200010778E17C0348F20C0D02D2466C0682646
-:10263000240663FF2C008D35C08064D04AD9E0DCCD
-:1026400030DBE0DF301AE8FDB188B4FF17E8FD8623
-:10265000C9249DFF8DC82CCC102D46300767012D55
-:1026600046320A66011DE8F7264631AD6D2D463328
-:1026700026F21597B796B684C3BCBB94B58D3529A1
-:102680009C107D83C22F211DC14663FD4B000000BD
-:102690006C1006292006289CF86582BF2921022B90
-:1026A000200C09094C6590E116E8C30CBA11A6AAE2
-:1026B0002DA2862C0A127DC30260028C19E8BF0984
-:1026C000B90A2992A36890078C2009CC0C65C278BE
-:1026D00029A2856492722D629E1AE8B56FD80260B5
-:1026E000026E2AA22629160168A0082B22000ABB26
-:1026F0000C65B25C29629DC18C6492542A21200A27
-:10270000806099102C203CC7EF000F3E010B3EB1BA
-:10271000BD0FDB390BBB098F260DBD112DDC1C0D48
-:102720000D410EDD038E27B1DD0D0D410FEE0C0DB9
-:10273000BB0B2BBC1C0BB7027EC71C2C21257BCBF3
-:10274000162D1AFC0CBA0C0DA16000093E01073EC3
-:10275000B1780987390B770A77EB0260020A2C21DE
-:1027600023282121B1CC0C0C4F2C25237C8B29B0A4
-:10277000CD2D2523C855DA20DB3058106F292102D2
-:10278000CC96C0E80E9E022E2502CC57DA20DB3014
-:10279000DC405810F0C020D10F2C20668931B1CC1C
-:1027A0000C0C472C24666EC6026001D309FD5065EF
-:1027B000D1CD2F0A012E301129221464E0112822D4
-:1027C0001B090C4400C10400FA1A0A880228261BBF
-:1027D0002E3010C0A0C0B0941295131CE878883039
-:1027E0002CC022088D14778704C0F10CFA38C04140
-:1027F000C0F225203CC0840858010F5F010F4B3800
-:1028000005354007BB10C0F0084F3808FF100FBB5C
-:102810000228ECFEC0F0084F38842B0BA8100AFFEA
-:10282000102A21200F88020B880208440218E8862B
-:102830008F110844022821250A2A14082814048824
-:10284000110A88022A210494F08B2004E41008BBAA
-:102850001104BB02C04A04BB029BF1842A08AB11DD
-:102860000BEB0294F40A54110B44020555100D1B96
-:102870004094F707BB100B5502085502C08195F62E
-:102880008433C05094F3B1948B3295F898F99BF24D
-:10289000C080C1BC24261499FA9BF598FB85389515
-:1028A000FC843A94FD8B3B9BFE883998FF85352547
-:1028B000F6108436851324F6118B3784122BF6120A
-:1028C000C0B064C07E89307797438D3288332E3014
-:1028D000108F111CE84A0999400699112CF614C072
-:1028E000C42CF6158C2B2DF61A28F61B2BF6190482
-:1028F000A81109880208EE0219E840C18008EE021A
-:1029000009C90229F6162EF618C09E600001C09A69
-:102910002F200C18E8300CFE11A8FFA6EE2DE28542
-:102920002BF4CF0D9D0B2DE685C87F8A268929A71C
-:10293000AA9A260A990C090948292525655050C0EC
-:1029400020D10F00C09A63FFC6DA2058111463FE2D
-:1029500038DA20C0B658111163FE2E0068973C2B60
-:102960009CFD64BE24C020D10FDA20DB705810CD4E
-:10297000C0C0C0D10ADA390ADC3865CDE063FE098F
-:102980008A102B2104580F9DC0B02B246663FE21B2
-:10299000DB402A2C7458097ED2A0D10FDA20580FC0
-:1029A000A263FCF76C1004C020D10F006C10042946
-:1029B0000A801EE8261FE8261CE7FF0C2B11ACBB83
-:1029C0002C2CFC2DB2850FCC029ED19CD0C051C0C6
-:1029D0007013E82214E82118E81F2AB285A82804F9
-:1029E000240A234691A986B8AA2AB685A9882784ED
-:1029F0009F25649FD10F00006C100AD6302830103C
-:102A0000292006288CF964829B68980B2A9CF9651A
-:102A1000A1B2022A02580F8489371BE7E8C89164E3
-:102A2000520E2A21020A0C4C65C2588D3019E7E17A
-:102A300074D7052E212365E29E2F929E1AE7DD6F43
-:102A4000F8026002532AA22668A0082C22000ACCB1
-:102A50000C65C2442A929D64A23E9A151FE7D78D49
-:102A600067C1E6C8DD2B620618E7D564B00528808B
-:102A7000217B8B432B200C18E7CF0CBC11A8CC2951
-:102A8000C28679EB460FBE0A2EE2A368E0052F222C
-:102A9000007EF9372CC2859C1864C2332B212F8706
-:102AA000660B7B360B790C6F9D266ED2462C203D33
-:102AB0007BC740CE5560001E2A200CC1B28C205826
-:102AC00010F79A1864A2458D6763FFCFC0C063FFFB
-:102AD000C5D7B063FFD300C0E06000022E60030ED4
-:102AE000DB0C6EB20EDC700CEA11AA6A2AAC20581C
-:102AF0000199D7A0DA20DB70C1C82D212058109190
-:102B00008C268B279A160CBB0C7AB3348F188963EA
-:102B100099F3886298F28E659EF82D60108A189D50
-:102B20001768D729C0D09DA92C22182B22139CAB43
-:102B30009BAA97A58E667E7302600097CF586000AF
-:102B40001FDA208B1658105765A13863FFBDC0816E
-:102B5000C0908F18C0A29AF999FB98FA97F563FF75
-:102B6000D2DB30DA20DC40580FFBC051D6A0C0C009
-:102B70002BA0102CA4039B172C1208022A02066B10
-:102B800002DF702D60038E179D149E100CDD11C0A6
-:102B9000E0AD6D2DDC205801188C148B16ACAC2CDC
-:102BA00064038A268929ABAA0A990C9A26886609A1
-:102BB000094829252507880C98662F2218A7FF2F7A
-:102BC000261863FE96DA20DB30DC40DD5058110514
-:102BD000D2A0D10FC0302C20668961B1CC0C0C473B
-:102BE0002C24666EC6026000D2C03009FD5065D04C
-:102BF000CA8E6764E069647066DB608C18DF70DA27
-:102C0000202D60038E170CDD119E10AD6D2DDC2084
-:102C10001EE78D5800F9232618DA208B16DC402F8A
-:102C20002213DD50B1FF2F2613580F9AD2A0D10FD7
-:102C30000028203D084840658DE76F953EDA308DCD
-:102C4000B56D990C8CA80C8C14CACF7CD32D2AACF2
-:102C500010C090292467090D4764DDC5600092000B
-:102C60002C1208066B022D6C20077F028E17DA20CB
-:102C70009E101EE77458007D63FF9A00C09163FFA9
-:102C8000D1000000655081DA20DB60DC40580FB1D4
-:102C9000C020C0F02FA403D10FDA20C0B658103FD7
-:102CA00063FFE000006F950263FD6CDA20DB30DC2F
-:102CB00040DD50C4E0580F32D2A0D10F8A152B212D
-:102CC00004580ECE232466286010981763FF210055
-:102CD000DA2058103263FFABC858DB30DA20580FC7
-:102CE000162A210265AF9CC09409A9022925026316
-:102CF000FF91DB30DC40DD50C0A32E0A802A24681F
-:102D0000DA20580F1FD2A0D10FC020D10FDA202B0C
-:102D1000200C58104763FF6B6C1004282006C0621B
-:102D2000288CF8658125C050C7DF2B221BC0E12A03
-:102D3000206B29212300A104B099292523B1AA00E1
-:102D4000EC1A0BC4010A0A442A246B04E4390DCCA2
-:102D5000030CBB012B261B64406929200C1BE715C3
-:102D60000C9A110BAA082FA2861BE7136FF90260B9
-:102D700000B60B9B0A2BB2A368B0082C22000BCC28
-:102D80000C65C0A42BA2851DE73664B09B8C2B2458
-:102D900021040DCC029CB08820C0C50888110C8885
-:102DA0000298B1882A08441198B48F3494B79FB51B
-:102DB000C0401EE7082DA2850E9E0825E4CF2DDC1D
-:102DC000282DA68529210209094C68941A689820A3
-:102DD000C9402A210265A00B2A221E2B221D7AB18E
-:102DE0000265A079C020D10F2C212365CFDE6000C1
-:102DF000082E21212D21237EDBD52B221E2F221DE3
-:102E00002525027BF901C0B064BFC413E6E92CB0EC
-:102E10000728B000DA2003880A28824CC0D10B8032
-:102E200000DBA065AFE763FFA62A2C74C0B02C0AB4
-:102E300002580E081CE70C9CA08B2008BB1106BB97
-:102E4000029BA1893499A263FF790000262468DAE5
-:102E500020DB30DC40DD50581063D2A0D10FDA20E7
-:102E60002B200C580FCEC020D10F00006C1006078D
-:102E70003D14C080DC30DB40DA20C047C02123BCD9
-:102E800030032838080842774001B1DD64815A1EBA
-:102E9000E6C519E6C629E67ED30F6DDA050050882F
-:102EA00000308CC0E0C02025A03C14E6C4B6D38F0F
-:102EB000C0C0D00F87142440220F8940941077F7A8
-:102EC00004C081048238C0F10B2810C044C0220421
-:102ED000540104FD3802520102FE3808DD10821C44
-:102EE00007EE100E6E020EDD02242CFEC0E004FE82
-:102EF000380AEE100E88020D88028DAB1EE6B4086B
-:102F0000D8020E880298B0C0E80428100E5E018432
-:102F1000A025A125084411084402052514045511D3
-:102F2000043402C0810E8E3994B18FAA84109FB4EC
-:102F300075660C26A11FC0F2062614600009000069
-:102F400026A120C0F20626140565020F7701078727
-:102F50003905E61007781008660206550295B62571
-:102F6000A1040AE61108581108280208660296B75B
-:102F7000C060644056649053067E11C0F489C288D4
-:102F8000C30B340B96459847994618E69B9F41041E
-:102F900059110E99021FE699020E4708D80298426D
-:102FA0000E99029F40C1E00E990299442FA00CB4E3
-:102FB000380CF91114E6881EE67FA4FFAE992E9214
-:102FC0008526F4CF0E880B289685D10F2BA00C1FD9
-:102FD000E6791CE6800CBE11ACBBAFEE2DE2852677
-:102FE000B4CF0D3D0B2DE685D10FC0800528387874
-:102FF000480263FEA263FE966C1006C0C06570F1C5
-:103000008830C030088714778712C0B0C0A619E690
-:103010006B299022C030CC97C031600003C0B0C093
-:10302000A6C0E0C091C0D4C08225203C0B3F1097C1
-:1030300012831CC0700858010D5D01089738C080CC
-:103040000B9838077710048810086802087702C0C8
-:10305000800D98382D3CFE0888100D9E388D2B0A67
-:10306000EE1008EE0207EE020CB8100FDD02053B71
-:10307000400EDD029D408920043D100899110D99F4
-:10308000022D210409A90208DD119941872A05B9F9
-:10309000100D3D020ABB110DBB02087702974428B0
-:1030A00021258712082814048811071E4007EE10F6
-:1030B0000E990275660926211F0626146000060077
-:1030C0002621200626140868029B47098802984694
-:1030D00029200CD2C0C0800C9E111BE63E1FE63595
-:1030E000AB99AFEE2DE2852894CF0DAD0B2DE68583
-:1030F000D10FDD40C0A6C0B08E51CAE0B2AAB1BBAC
-:103100002DDC108F500E7836981008770C9FD898C9
-:10311000D989538F52991199DB9FDA7E8309B1CCFB
-:10312000255C10C97763FFCF88108D1108E70C97D5
-:1031300051AD8DD7F078DB01B1F79D5397528830B0
-:10314000C030088714088840648ED565BEC963FE08
-:10315000BC0000006C1004D720B03A8820C0308238
-:1031600021CAA0742B1E2972046D080FC980C99151
-:103170008575B133A2527A3B0B742B0863FFE900CB
-:10318000649FECD10FD240D10F0000006C100AD622
-:10319000302E3027D950DA4015E6092430269A150A
-:1031A00029160464E0026493732920062A9CF865BA
-:1031B000A3CE2A2102270A040A0B4C65B3978C3050
-:1031C00074C7052D212365D4A0C0A62B0A032C2289
-:1031D00000580F0B64A3B917E5F78E389A1664E30D
-:1031E000BA2F6027285021C9F37E8311C2B08C20EA
-:1031F0002A200C580F2AD7A0CDA16004A200C2B08B
-:103200008C202A200C580EFED7A064A4862F212ED5
-:103210008B680FBF360FB90C6F9D54296027D5B04E
-:103220006E920528203D7B8F4CDA20DB50C1C42DE7
-:10323000211F580EC48B269A189A1989272AAC3850
-:103240000B990C7A93538963C08099738F62987835
-:103250009F728E659E798D679D7B8C6695759C7A35
-:103260008E687E53026000B18B1465B050600038E8
-:10327000DBF063FFA5008A14C9A92E60030E9B0C26
-:103280006EB2A5DC500CEA11AA6A2AAC285BFFB129
-:10329000D5A063FF93C0E063FFE2DA208B18580EDD
-:1032A0008165A2B163FF9E0000DA20DB308C1558E7
-:1032B0000E29D6A0C0C0C0D12D16042CA403DC70EA
-:1032C000DA20DB60DF502D6003C0E09E109D171EEA
-:1032D000E5D20CDD110D6D082DDC285BFF478E66F5
-:1032E0008F678817AF5FA8A828640375FB01B1EE4C
-:1032F0008A189E669F6789268829AA9909880C9949
-:10330000268E6808084805EE0C28252515E5AC9E94
-:103310006865EECC63FEE6000000C9432F21232B35
-:1033200021212FFC010F0F4F2F25237FBB026003AC
-:10333000142C20668961B1CC0C0C472C24666EC617
-:103340000260022809FD5065D22264E1B62E602792
-:1033500064E1B0DC70DF50DA20DB601EE5C32D6075
-:1033600003C08098100CDD11AD6D2DDC285BFF22B1
-:10337000644181C0442B0A008C202A200C580EA0E6
-:103380000AA70265A00FC0B02C22002A200C580EFC
-:103390009CD7A064AFEFDA20C1BCC1C82D21208F1B
-:1033A000188E268929AFEE9E260E990C0909482908
-:1033B0002525580E64C090C050C0C288609A191E5E
-:1033C000E57FC0A12EE022088F14778704C0810E0C
-:1033D0008938C0800B93102D203C2921200CDC0162
-:1033E00004DB010929140BA8380CA5380D3D401C3D
-:1033F000E5968B2B088810075510085502053302F7
-:103400002821250F154003BB020CBB0207551005F0
-:10341000D3100828140ADD11048811098802053325
-:10342000022921040833029B70C0808A201BE58F8B
-:1034300008AA110BAA029A71C0A1852A93769574E5
-:1034400008931103DD020ADD029D778C63C1DC9CC9
-:10345000738B6298789A799B72232214C0C0B1351D
-:103460002526149C7B9D75937A2B621A9B7C2A627D
-:103470001C9A7D28621D987E25621B957F2362170A
-:103480002376102D62182D76112C62192C76126479
-:10349000E0B98E6077E73DC0FE13E5571DE558C1E2
-:1034A000818A628B630495110E9C4006CC110C55E9
-:1034B00002247615085502C0802D76148D2B2B76AC
-:1034C0001B2A761A28761925761803DD022D761622
-:1034D0006000030000C0FA2E200C19E53E18E53507
-:1034E000A9E90CEE11A8EEC0802DE2852894CF0D3D
-:1034F000FD0B2DE685DA208B198C158D14580D6582
-:10350000D2A0D10FDC70DF50DB602D6C28C0A01E74
-:10351000E5569A10DA205BFE5563FE53002B203DE2
-:103520000B4B4065BC826FE527DA308F556DE90C97
-:103530008EAA0E8E14C9E87EF3162AAC10C090290C
-:103540002467090F4764FC6060015F00C0FA63FFF5
-:1035500085C09163FFE88814658168DA20DB608CA0
-:1035600015580D7CC020C09029A403D10F8A162BBA
-:103570002104580CA2C0A02A24668E6863FDCA00EC
-:10358000002B9CF965B0FDDA20580CA763FC2200E3
-:1035900000DA20C0B6580E0163FFBA002B200C0CD5
-:1035A000BE11A7EE2DE286C1C27DC30260011819CB
-:1035B000E50209B90A2992A36890082A220009AAFB
-:1035C0000C65A10326E2856460FD2C20668931B17B
-:1035D000CC0C0C472C24666FC60270960C8A162BF6
-:1035E0002104580C86C0D02D24668E3077E74D1C00
-:1035F000E5021BE5028F328833C0A42D21040E9909
-:103600004006991104DD1109DD029A61C19009DDBE
-:10361000029B60C0908B2B9D649F66986799650C98
-:10362000BB029B6228200C1AE4EBAA8A0C8811A723
-:10363000882F828529A4CF2FFC202F86858A1465A8
-:10364000A0A6C020D10FB0FC8B142C2523C8B70234
-:103650002A02066B02580CB82A210265AEF7C0D8C0
-:103660000DAD022D250263FEEC008E14C8E8DA20B1
-:10367000DB30580CB12A210265AEDA07AF022F25E4
-:103680000263FED100DA20DB308C158D14580E5504
-:10369000D2A0D10FDA202B200C580DC063FEB6004B
-:1036A000DA202B200C580DE263FEAADA20DB308CE6
-:1036B000152D12042E0A80280A00282468580CB000
-:1036C00063FAE500C020D10FDA20580DB48914CD7B
-:1036D00092DA20DB308C15580D1FDBA0C020C0A073
-:1036E0002AB403D10FC020D10F2A2C748B15580691
-:1036F00028D2A0D10F0000006C100C2821029410D9
-:1037000008084C6583621FE4AB29F29E6F98026043
-:1037100003661DE4A729D2266890082A220009AA78
-:103720000C65A3542CF29D64C34E2B200C0CB611D7
-:10373000AF66286286C1EC78E30260034619E49E16
-:1037400009B90A2992A36890078A2009AA0C65A3DF
-:103750003224628564432CC0E12A3109C0702724D9
-:103760006689359A11992A88369912982B89379843
-:1037700013992C883899140858149815982D89395C
-:103780002A25042E251D29251C283028C0922824EE
-:103790003C2A302908084798160989012A243D2A1D
-:1037A000311599170A094109A90C299CEC29251FF3
-:1037B0007E87192D2A000DA06000083E010A3EB147
-:1037C000AD08DA390EAA110A990C29251F2A211FE2
-:1037D00018E4A80A8160C1D0941A951B01083E0024
-:1037E000053EB184054839843C259CFC0D8836296A
-:1037F000201408AA1C8D3D2726182E26132E2614C9
-:103800002E261527261B2E246B27246727246808BD
-:10381000581C0909432924142932112A252E282548
-:103820002F27252427252527252C27252325252037
-:103830002425212D2522841A2D211C851B6FD202BF
-:10384000600209C0A099186D080AB1AA00A104007D
-:10385000E91A7D9B0263FFEE8918C080C0E1C07049
-:10386000C0D29B1D951B961C9C1E16E4722C203DFD
-:1038700015E4820C0B400DCC010BE7381DE4640A03
-:1038800077100CE8380B8810C0C49C410877029D63
-:1038900040B0A80988118B209C499D48954B9643C0
-:1038A000087702861418E47315E45A08770205BBFA
-:1038B000029B4A9B4297468812871108DA149A4E57
-:1038C0000D88100D77110877021AE44E06D8140DF2
-:1038D0006610087702974FC78F984D984C98458788
-:1038E0001598440715140D55110A5502954715E40E
-:1038F000638A262D46102D46182D46202C46112C65
-:1039000046192C46212B46122B461A2846142846C7
-:10391000152B462288162546242546268B170A0C89
-:1039200048090D4885130EDD1105CC110839400BEF
-:10393000EB390299101EE4520DCC020D5511082DE1
-:10394000400655022E461316E41D0FDD11254616BE
-:10395000080840851B0188100DBB0286671DE449DD
-:103960000988020CBB0219E4191CE4472B46172DE9
-:10397000461BA7661BE446C0702C461C0988028CB7
-:103980001E28461E2B4623C0908B1D29461D294606
-:103990001F18E43F2946272846252931162E2006E0
-:1039A00029246A243117962D242538861CCCE1273A
-:1039B0002407C0D7090E4064E0829A29092841648F
-:1039C000809164409B2D2406C098094936280AA09E
-:1039D00024628501C404A84428210424668508883B
-:1039E000118E3F8A3E2D32100EA41800C4040EAE74
-:1039F0001800EE110ACA530EDD02C0E30E880298C9
-:103A0000C11EE42409084E9EC08E2094C398C59D13
-:103A1000C418E3F01DE42105EE110EAA020DAA025E
-:103A2000A8B82784CF9AC21EE3E224F29D27E4A21D
-:103A3000244C1824F69D655052C020D10F2D240629
-:103A4000C0A0C09809493604A93863FF7FC0A063AD
-:103A5000FE070000654F6DC098C0A82A240663FFCA
-:103A60006B2D2406C09063FF63CC57DA20DB308CCB
-:103A700010580C38C020D10F00DA20C0B6580CC73F
-:103A800063FFE500DA20580CC563FFDC2A2C748B39
-:103A90001058053FD2A0D10F6C10062820068A339B
-:103AA0006F8202600161C05013E3C229210216E354
-:103AB000C1699204252502D9502C20159A2814E3B7
-:103AC000BF8F2627200B0AFE0C0477092B711C647C
-:103AD000E1398E428D436FBC0260016F00E104B09A
-:103AE000C800881A08A80808D80298272B2006685A
-:103AF000B32ECE972B221E2C221D0111027BC90151
-:103B0000C0B064B0172CB00728B000DA2003880AD0
-:103B100028824CC0D10B8000DBA065AFE7C020D16C
-:103B20000F2D206464DFCA8B29C0F10BAB0C66BF7C
-:103B3000C02B200C0CBC11A6CC28C2862E0A0878FB
-:103B4000EB611EE39D0EBE0A2EE2A368E00528226B
-:103B5000007E894F29C2851EE3A96490461FE3B603
-:103B60009E90C084989128200A95930F880298927D
-:103B70008E200FEE029E942F200788262F950A98FC
-:103B8000969A972E200625240768E3432921022AC6
-:103B9000C2851DE3902AAC20ADBD25D4CF2AC685B1
-:103BA00063FF4E002E2065CBEDC082282465C9F648
-:103BB00005E4310002002A62821BE3982941020BCE
-:103BC000AA022A668209E43129210263FF23000048
-:103BD00064DFB88F422E201600F1040DEE0C00EECB
-:103BE0001AAEAE9E2963FFA38A202B3221B1AA9A76
-:103BF000B0293221283223B4992936217989A92B79
-:103C000032222B362163FFA0C020D10F9F2725240D
-:103C100015ACB828751C2B2006C0C12EBCFE64E074
-:103C2000AB68B7772DBCFD65DEC72D2064C0F0649E
-:103C3000D0868E290EAE0C66E089C0F128205A2865
-:103C40008CFE08CF3865FEE863FF580000E004935F
-:103C500010C0810AF30C038339C78F08D80308A862
-:103C60000108F80C080819A83303C80CA8B828756F
-:103C70001C030B472B24158310CBB700E104B0BC09
-:103C800000CC1AACAC0CDC029C27659E5EC0B20B6B
-:103C9000990209094F29250263FE50002D206A0D63
-:103CA0002D4165DF7EDA20C0B0580C8F64AF18C09C
-:103CB000F163FEEF9F2763FFD02E221F65EE326374
-:103CC000FF79000028221F658E2763FF6E252406DA
-:103CD00029210263FE1B00006C10066571332B4C1A
-:103CE00018C0C7293C18C0A1C08009A838080842DC
-:103CF0006481101CE32C1AE32D2AC67E2A5CFDD3B6
-:103D00000F6DAA0500B08800908C8940C0A009887A
-:103D1000471FE355080B47094C50090D5304DD10AC
-:103D2000B4CC04CC100D5D029D310CBB029B3088DD
-:103D3000438E2098350FEE029E328D26D850A6DD98
-:103D40009D268E40C0900E5E5064E0971CE33B1EA3
-:103D5000E32B038B0BC0F49FB19EB02D200A99B3C7
-:103D60000CDD029DB28F200CFF029FB48E262D2009
-:103D7000079EB68C282DB50A9CB72924072F20064C
-:103D80002B206469F339CBB61DE30D2320168DD2A9
-:103D90000B330C00D10400331AB48DA3C393292232
-:103DA000200C13E30C1FE3030C2E11AFEEA322290A
-:103DB00024CF2FE285D2A00FDD0B2DE685D10F0099
-:103DC0002E200CB48C0CEB111FE3031DE2FAAFEEB6
-:103DD000ADBB22B28529E4CF02C20B22B685D2A0A8
-:103DE000D10F00002E200C1CE2F31FE2FA0CEB11A5
-:103DF000AFEEACBB22B28529E4CF02820B22B6859E
-:103E0000D2A0D10FC0D00BAD387DC80263FEEC63E9
-:103E1000FEE08E40272C747BEE12DA70C0B32C3C8F
-:103E200018DD50580A868940C08063FEE3066E02A2
-:103E3000022A02DB30DC40DD505800049A10DB50CF
-:103E4000DA70580453881063FEF700006C10069275
-:103E5000121EE2E48C40AE2D0C8C472E3C1804CA96
-:103E60000BD9A07DA30229ADF875C302600084C000
-:103E7000B0C023C0A09D106D0844B89F0EB80A8D35
-:103E8000900EB70BB8770D6D36ADAA9D800D660C00
-:103E9000D8F000808800708C879068B124B22277B7
-:103EA000D3278891C0D0CB879890279C100070882A
-:103EB00000F08C9D91CB6FC08108BB0375CB36633E
-:103EC000FFB4B1222EEC1863FFD485920D770C86D7
-:103ED000939790A6D67D6B01B1559693959260000D
-:103EE00016B3CC2D9C188810D9D078D3C729DDF80B
-:103EF00063FFC100C0238A421BE2E900CD322D449A
-:103F0000029B3092318942854379A1051EE2E50E7C
-:103F1000550187121BE2D5897095350B99029932AC
-:103F200088420A880C98428676A6A696768F44AF79
-:103F3000AF9F44D10F0000006C10089311D6308859
-:103F400030C0910863510808470598389812282115
-:103F500002293CFD08084C6581656591628A630A07
-:103F60002B5065B18B0A6F142E0AFF7CA60A2C20F9
-:103F70005ACCC42D0A022D245A7FE0026002158912
-:103F80002888261FE2C809880C65820F2E200B0F97
-:103F9000EE0B2DE0FE2EE0FF08DD110EDD021EE22D
-:103FA000C2AEDD1EE2C21CE2C20EDD010DCC37C185
-:103FB00080084837B88DB488981089601AE2807BF1
-:103FC00096218B622AA0219C147BA3179D132A2083
-:103FD0000C8B108C20580BB18C148D13DBA0CEAC45
-:103FE0006001C4002E200C1BE2730CEA110BAA081E
-:103FF0002BA2861FE2717BDB3B0FEF0A2FF2A36837
-:10400000F0052822007F892C2BA28564B0AA876244
-:104010008826DE700C7936097A0C6FAD1C8F279BD1
-:104020001508FF0C77F3197E7B729D139C149B156A
-:10403000CF56600025C0B063FFD0D79063FFDD008E
-:10404000009D139C14DA20DB70580B168B158C1412
-:104050008D1365A06A8E6263FFCC00DA208B11DCC1
-:1040600040580ABCD6A08B15C051DE70DA20DC6047
-:10407000DD405BFF768D138C14D9A02E200C1BE243
-:104080004D1FE2540CEA11AFEFC0E0ABAA2BA285A2
-:104090002EF4CF0B990B29A68563FF1D00DA20DCD7
-:1040A00060DD40DE708912282007DF50A9882824AF
-:1040B000075BFF09D2A0D10F00DBE0DA20580B37F5
-:1040C0006550EF2A20140A3A4065A0EBDB60DC4023
-:1040D000DD30022A025809A7D6A064A0D584A183A6
-:1040E000A00404470305479512036351C05163FEC2
-:1040F0005C2C2006D30F28CCFD6480A568C704C0C3
-:10410000932924062C2006C0B18D641FE22C9D2724
-:104110009D289D298FF29D2600F10400BB1A00F016
-:1041200004B0BE0EDD01C0F0ADBB8D652F24070DC0
-:104130000E5E01EE11AEBB2E0AFEB0BB0B0B190ECC
-:10414000BB36C0E20B0B470EBB372B241618E224FC
-:104150000A09450D0B422B240B29240AB4BE2E2438
-:104160000C7D88572920162FCCFDB09D0A5C520D7E
-:10417000CC362C246465FDEC0C0C4764CDE618E2CB
-:104180000F8E2888820C9F0C00810400FF1AAFEE6E
-:104190009E2963FDCF1CE23E63FE13001CE23563E3
-:1041A000FE0C8D6563FFA500DA202B200C580B2038
-:1041B000645F0FC020D10F00C020D10FC09329240D
-:1041C00016C09363FFA000006C1004C06017E1F8F4
-:1041D0001DE1FBC3812931012A300829240A78A175
-:1041E00008C3B27BA172D260D10FC0C16550512605
-:1041F00025022AD0202F200B290AFB2B20142E2049
-:104200001526241509BB010DFF0928F11C2B2414C8
-:10421000A8EE2EF51C64A0A92B221E28221D011138
-:10422000027B8901DB6064B0172CB00728B000DA8C
-:104230002007880A28824CC0D10B8000DBA065AF24
-:10424000E7DB30DC40DD50DA205800DE29210209AE
-:104250000B4CCAB2D2A0D10F00CC5A2C30087BC173
-:10426000372ED02064E02D022A02033B02DC40DD21
-:10427000505800D4D2A0D10F2B2014B0BB2B241443
-:104280000B0F4164F0797CB7CAC0C10C9C022C258D
-:1042900002D2A0D10FC020D10F2E200669E2C12684
-:1042A00024062B221E2F221D29200B2820150D99B4
-:1042B000092A911C262415AA8828951C7BF149609F
-:1042C0000048B0BB2B24140B0A4164A0627CB702E7
-:1042D0002C25022B221E2C221DD30F7BC901C0B01E
-:1042E000C9B62CB00728B000DA2007880A28824C0B
-:1042F000C0D10B8000DBA065AFE7C020D10F00006C
-:10430000262406D2A0D10F0000DB601DE1AC64BF03
-:104310004F2CB00728B000DA2007880A28824CC04A
-:10432000D10B8000DBA065AFE71DE1A463FF310086
-:1043300026240663FF9C00006C1004282006260A31
-:10434000046F856364502A2920147D9724022A0271
-:10435000DB30DC40DD50580019292102090A4CC825
-:10436000A2C020D10FC0B10B9B022B2502C020D1CF
-:104370000F00022A02033B022C0A015800D1C9AAED
-:10438000DA20DB30DC405809F329A011D3A07E9756
-:10439000082C0AFD0C9C012CA411C0512D201406E0
-:1043A000DD022D241463FFA4DA20DB30DC40DD5075
-:1043B000C0E0580973D2A0D10F0000006C1006169F
-:1043C000E17D1CE17D655157C0E117E179282102AB
-:1043D0002D220008084C6580932B32000B6951296F
-:1043E0009CFD6590872A629E6EA84C2A722668A062
-:1043F000027AD9432A629DCBAD7CBE502B200C0C97
-:10440000BD11A6DD28D2862F4C0478FB160CBF0AFE
-:104410002FF2A368F0052822007F89072DD285D3CB
-:104420000F65D0742A210419E1A3D30F7A9B2EDAE9
-:104430002058086E600035002D21041BE19E7DBBD5
-:1044400024DA20C0B6580869CA546001030B2B5007
-:104450002B240BB4BB0B0B472B240C63FFA0DA20DF
-:10446000580A4E600006DA20C0B6580A4C6550E083
-:10447000DC40DB302D3200022A020D6D515808BDA0
-:104480001CE14ED3A064A0C8C05184A18EA0040436
-:10449000470E0E4763FF3500002B2104C08C893185
-:1044A000C070DF7009F950098F386EB8172C20667C
-:1044B000AECC0C0C472C24667CFB099D105808CF11
-:1044C0008D1027246694D11EE151B8DC9ED0655032
-:1044D00056C0D7B83AC0B1C0F00CBF380F0F42CBAE
-:1044E000F119E13018E13228967EB04BD30F6DBA46
-:1044F0000500A08800C08C2C200CC0201DE1360CCB
-:10450000CF11A6FF2EF285ADCC27C4CF0E4E0B2EB9
-:10451000F685D10FC0800AB83878D0CD63FFC100CE
-:104520008E300E0E4763FEA12A2C742B0A01044D17
-:10453000025808C22F200C12E1270CF911A699A2EB
-:10454000FF27F4CF289285D2A008480B289685D162
-:104550000FC020D10F0000006C1004C060CB55DBF1
-:1045600030DC40055D02022A025BFF94292102092A
-:10457000084CC882D2A0D10F2B2014B0BB2B24141E
-:104580000B0C41CBC57DB7EBC0C10C9C022C2502A6
-:10459000D2A0D10F0000022A02033B02066C02C027
-:1045A000D0C7F72E201428310126250228240A0F0F
-:1045B000EE012E241458010E63FFA300262406D218
-:1045C000A0D10F006C1006282102D62008084C65E7
-:1045D000809D2B200C12E0F70CB811A2882A82864D
-:1045E000B5497A930260009719E0F409B90A299253
-:1045F000A36890082A620009AA0C65A08228828517
-:104600001CE0FF6480799C80B887B14B9B819B1034
-:10461000655074C0A7D970280A01C0D0078D380D25
-:104620000D42CBDE1FE0E01EE0E12EF67ED830D357
-:104630000F6D4A0500808800908C2E3008C0A000C5
-:10464000EE322E740028600C19E0E30C8D11A2DD0F
-:10465000A988C0202CD2852284CFD2A00CBC0B2CE0
-:10466000D685D10FC0F0038F387FA0C063FFB400A0
-:10467000CC582A6C74DB30DC405807F6C020D10FD0
-:10468000DA605809C663FFE7DD402A6C74C0B0DC0D
-:104690007058086A2E30088B1000EE322E740028F5
-:1046A000600C19E0CC0C8D11A2DDA988C0202CD2A1
-:1046B000852284CFD2A00CBC0B2CD685D10F000054
-:1046C0006C1004292014282006B19929241468812B
-:1046D00024C0AF2C0A012B21022C24067BA004C08D
-:1046E000D02D2502022A02033B02044C02C0D058FE
-:1046F00000C0D2A0D10FC020D10F00006C1004293F
-:104700003101C2B429240A2A3011C28378A16C7BFA
-:10471000A1696450472C2006C0686FC562CA572D36
-:1047200020147CD722DA20DB30DC40DD505BFFA593
-:10473000292102090E4CC8E2C020D10FC0F10F9F01
-:10474000022F2502C020D10FDA20DB30C0C05BFF72
-:10475000DC28201406880228241463FFC7292015AA
-:104760001BE0972A200BC0C09C240BAA092BA11C7C
-:104770002C2415AB9929A51C63FF9900C020D10FEB
-:10478000DA20DB30DC40DD50C0E058087DD2A0D11B
-:104790000F0000006C1004CB5513E09225221F0D72
-:1047A000461106550CA32326221E25261F06440B60
-:1047B00024261E734B1DC852D240D10F280A80C038
-:1047C0004024261FA82828261E28261DD240D10FA7
-:1047D000C020D10F244DF824261E63FFD80000000E
-:1047E0006C1004D620282006C0706E85026000D4AC
-:1047F0001DE07919E07112E06F2A8CFC64A1302B66
-:104800006102B44C0B0B4C65B0A22B600C8A600C9F
-:10481000B8110288082E828609B90A7EC302600098
-:104820009A2992A368900509AA0C65A08E28828512
-:10483000648088B8891BE07594819B80655155C060
-:10484000B7B8382A0A01C0C009AC380C0C4264C0A1
-:10485000421FE0541EE0562EF67EB04AD30F6DAADA
-:104860000500808800908CC0A029600C0C9C11A2CF
-:10487000CC2BC285AD990B4B0B2BC6852860062728
-:1048800094CF6881222D6015D2A0C9D2C0E22E64D7
-:1048900006D10F00C0F008AF387FB0BD63FFB10094
-:1048A000276406D2A0D10F00D2A0D10F00CC57DAD6
-:1048B00060DB30DC405808A7C020D10FDA6058090F
-:1048C0003763FFE80028221E29221DD30F789901A3
-:1048D000C080C1D6C1C11BE043C122AB6B64804222
-:1048E00078913F2A80000CAE0C64E0BB02AF0C64F0
-:1048F000F0B52EACEC64E0AF0DAF0C64F0A92EACBB
-:10490000E864E0A32FACE764F09D2EACE664E0978A
-:104910002F800708F80BDA807B83022A8DF8D8A055
-:1049200065AFBC28612308D739D97060007B0000CF
-:104930002B600C0CB811A2882C82862A0A087CAB4A
-:104940007E09BA0A2AA2A368A0052C62007AC96F60
-:104950002A828564A0691FE029276504C0E3C0C4DA
-:104960002E64069CA11CE0549FA02E600A97A30C05
-:10497000EE029EA28F600CFF029FA42E60147AEFBD
-:104980004627A417ADBC2F828527C4CF2FFC202F2C
-:10499000868563FE692A6C74C0B1DC90DD405807DF
-:1049A000A71DE00C63FEC100D9A0DA60DB30C2D0E5
-:1049B000C1E0DC4009DE39DD505807F1D2A0D10F4B
-:1049C000DA605808F663FEE4290A0129A4170DBF2E
-:1049D000082E828527F4CF2EEC202E868564500B7E
-:1049E0002A6C74DB4058016AD2A0D10FC020D10FCD
-:1049F0006C10062B221E28221D93107B8901C0B04B
-:104A0000C0C9C03BC1F20406401DDFF6C0E2C0745D
-:104A10000747010E4E01AD2D9E11C0402E0A1464B1
-:104A2000B06E6D084428221D7B81652AB0007EA1EE
-:104A30003B7FA1477B51207CA14968A91768AA1434
-:104A400073A111C09F79A10CC18B78A107C1AE29B8
-:104A50000A1E29B4007CA12B2AB0070BAB0BDAB0DD
-:104A60007DB3022ABDF8DBA0CAA563FFB428B0104D
-:104A700089116987BB649FB863FFDC00647FB463FE
-:104A8000FFD50000646FD0C041C1AE2AB40063FFFF
-:104A9000C62B2102CEBE2A221D2B221E7AB12A8CC1
-:104AA000107CB1217AB901C0B0C9B913DFC1DA20D5
-:104AB00028B0002CB00703880A28824CC0D10B8094
-:104AC00000DBA065AFE7D240D10F8910659FD463AA
-:104AD000FFF300006C1008C0D0C8598C30292102A7
-:104AE0000C0C4760000C8E300E1E5065E19E292193
-:104AF00002C0C116DFB0090B4C65B0908A300A6E57
-:104B00005168E3026000852F629E1BDFA96EF85397
-:104B10002BB22668B0052E22007BE94727629DB79D
-:104B200048CB7F97102B200CB04E0CBF11A6FF294D
-:104B3000F2869E12798B4117DFA007B70A2772A36E
-:104B4000687004882077893029F285DF90D79065D6
-:104B500090652A210419DFD77A9B22DA205806A310
-:104B6000600029002C21041BDFD37CBB18DA20C095
-:104B7000B658069EC95860014CC09063FFCCDA203D
-:104B8000580886600006DA20C0B65808846551359A
-:104B9000DC40DB308D30DA200D6D515806F6C0D088
-:104BA000D3A064A120292102C05184A18CA00404B7
-:104BB000470C0C4763FF3E00C09C8831DBD008F8EF
-:104BC00050089B3828210498116E8823282066AC51
-:104BD0008C0C0C472C24667CBB159F139E148A10EA
-:104BE0008B115807068E148F13C0D02D24668A307F
-:104BF000C092C1C81BDF867FA6099BF099F12CF4F7
-:104C00000827FC106550A4B83ADF70C051C0800777
-:104C1000583808084264806718DF6319DF64298602
-:104C20007E6A420AD30F6DE90500A08800F08CC0AF
-:104C3000A08930B4E37F9628C0F207E90B2C9408D2
-:104C40009B909F912F200C12DF630CF811A68829EE
-:104C50008285A2FF2DF4CFD2A009330B238685D104
-:104C60000F22200C891218DF5B0C2B11A6BBA82287
-:104C70002D24CF2CB285D2A00C990B29B685D10F4B
-:104C8000C087C0900A593879809663FF8ADB30DA92
-:104C900020C0C1C0D05BFF56292102C0D02A9CFE93
-:104CA00065AE4D2D2502C09063FE45009E142A2C52
-:104CB00074C0B1DC70DD405806E18E14C0D01BDF3B
-:104CC00053C1C863FF6AC020D10F00006C100628D2
-:104CD000210216DF3808084C65821929629E6F98F8
-:104CE0000260022019DF332992266890078A200982
-:104CF000AA0C65A20F27629DC0CC6472072B210409
-:104D00008E31C0A0DDA00EFE500ECD386EB8102C36
-:104D10002066B1CC0C0C472C24667CDB026001EFD2
-:104D2000C0C12930081BDF2564909C2F0AFFC0D327
-:104D3000B09E64E1026892136450882A2C74044B7C
-:104D4000025800930AA20206000000002B200C2744
-:104D500021040CBC11A6CC29C286280A087983023A
-:104D60006001B919DF1509B90A2992A36890082EC4
-:104D7000220009EE0C65E1A42EC28564E19E262086
-:104D80000713DF1E6E7B0260019A17DF151FDF1EFF
-:104D900019DF4BC0D228200A93E09DE1A9690F8852
-:104DA0000298E22F90802A9480B1FF07FF029FE3D0
-:104DB0002EC2851FDF080EDE0BAFBF2AF4CF2EC632
-:104DC00085655F76C020D10F2830102930112E3034
-:104DD0001300993200ED326480EE2A30141FDF3860
-:104DE00000AA3278EF050F9E092DE47F1EDF36669C
-:104DF000A0050F98092A8480B4A718DF33C76F0075
-:104E00009104AE9EDDE000AF1A00C31A6EE1052DDD
-:104E1000B2000DED0C1EDF2D08D81C063303AE8842
-:104E20002A848B2EB02E27848C03EE010FEE022EE7
-:104E3000B42E58018F63FEFF29310829250428303C
-:104E4000142E3109B0886480A32E240AC0812E302C
-:104E5000162CB4232E240BB4EF2F240C8C378B3656
-:104E6000292504DEB0DFC00C8F390B8E390FEE021E
-:104E700064EEC4089F1101C4048D380CB81800C436
-:104E8000040CBE1800EE110EDD02C0E30EFF021E80
-:104E9000DF019F719E701EDF008F2098739D740547
-:104EA000FF110BCD53C18098750FDD020EDD029D01
-:104EB000721EDEBF2A24662F629D2AE4A22FFC18F0
-:104EC0002F669D63FE710000002F30121BDF010072
-:104ED000FA3278FF050B980B2A847F66D0050B9A6F
-:104EE0000B2DA4802A301100AA3263FF442F240A1C
-:104EF0009E2B63FF56CC57DA20DB30DC4058071579
-:104F0000C020D10F00DA20C0B65807A463FFE50027
-:104F1000DA7058063AC0A02A246663FE02DA2058E6
-:104F2000079F63FFCFB16928200A862009094799A6
-:104F30001129240798107F812693E027E50A9AE338
-:104F400088109DE119DEDD8D11096F029FE42DE4CB
-:104F500016098802C0D398E22A240763FE51000094
-:104F60001DDEA60868118F11892B93E008FF02C08F
-:104F70008F9FE50D990299E2047F11C0D49DE1084D
-:104F8000FF029FE463FFD0006C1004C020D10F002B
-:104F90006C100485210D381114DE848622A42408A7
-:104FA000660C962205330B9321743B13C862D230F2
-:104FB000D10FC030BC29992199209322D230D10F32
-:104FC000233DF8932163FFE36C100AD62094181751
-:104FD000DE79D930B83898199914655252C0E1D2A7
-:104FE000E02E61021DDE760E0E4C65E1628F308E82
-:104FF000190F6F512FFCFD65F1558EE129D0230E5D
-:105000008F5077E66B8F181EDEB3B0FF0FF4110FD1
-:105010001F146590CE18DEB08C60A8CCC0B119DE2C
-:105020006428600B09CC0B0D880929811C28811A82
-:105030002A0A0009880C08BA381BDEA60CA90A291E
-:1050400092947B9B0260008C2B600C94160CBD111B
-:10505000A7DD29D286B8487983026000D219DE56CE
-:1050600009B80A2882A398176880026000A360002C
-:10507000A51ADE9A84180AEE01CA981BDE4D8C1917
-:105080002BB0008CC06EB3131DDE4A0C1C520DCC2D
-:105090000B2DC295C0A17EDBAE6000380C0C5360B6
-:1050A000000900000018DE8C8C60A8CCC0B119DEAD
-:1050B0004028600B09CC0B0D880929811C28811A16
-:1050C0002A0A0009880C08BA380CA90A2992947E89
-:1050D000930263FF72DA60C0BA580730645073609D
-:1050E000026600001ADE338C192AA0008CC06EA361
-:1050F0001A18DE2F0C1C5208CC0B18DE762BC2952A
-:10510000C0A178B30263FF3F63FFC9000C0C536377
-:10511000FF09896078991829D285C9922B729E1D42
-:10512000DE246EB8232DD226991369D00B60000DB2
-:10513000DA6058071A6000170088607D890A9A1A99
-:1051400029729D9C129915CF95DA60C0B658071345
-:105150006551F58D148C18DBD08DD0066A020D6D6B
-:1051600051580584D3A09A1464A1DD82A085A1B80A
-:10517000AF9F190505470202479518C05163FE60AD
-:105180002B6104C08C8931C0A009F950098A386E9E
-:10519000B81F2C6066A2CC0C0C472C64667CAB114B
-:1051A0009F119E1B8A155805958E1B8F11C0A02A32
-:1051B00064669F1164F0E12912032812096DF91742
-:1051C0002F810300908DAEFE0080889F9200908C0E
-:1051D000008088B89900908C65514E8A10851A8B92
-:1051E000301FDE06881229600708580A2C82942D89
-:1051F00061040ECC0C2C86946FDB3C1CDE30AC9C26
-:1052000029C0800B5D50A29909094729C48065D047
-:10521000DA2E600CC0D01FDDEF0CE811AFEEA788CE
-:105220002282852DE4CF02420B228685D2A0D10FA7
-:105230008E300E0E4763FDA6A29C0C0C472C640713
-:105240007AB6CD8B602E600A280AFF08E80C6481CC
-:105250000E18DE1983168213B33902330B2C341661
-:105260002D350AC02392319F30C020923308B202FC
-:1052700008E80292349832C0802864072B600CD270
-:10528000A01CDDD40CBE11A7EE2DE285ACBB28B46A
-:10529000CF0D9D0B2DE685D10F8B1888138D30B85F
-:1052A0008C0D8F470D4950B4990499100D0D5F0472
-:1052B000DD1009FF029F800DBB029B8165508D852B
-:1052C0001AB83AC0F1C0800CF83808084264806B04
-:1052D0001BDDB519DDB629B67E8D18B0DD6DDA059A
-:1052E00000A08800C08CC0A063FEF30082138B1660
-:1052F0001DDDC628600AC0E02EC4800D880202B2FF
-:105300000B99239F20C0D298229D2122600CB2BB12
-:105310000C2D11A7DD28D28508BB0B18DDAE2BD6CE
-:1053200085A8222E24CFD2A0D10F9E1B851A2A6CCD
-:10533000748B185BFF178E1B63FEA300C087C090A1
-:105340000AF93879809263FF86C020D10F9E1B2A0C
-:105350006C74C0B18D185805398E1B851A63FE7E9A
-:10536000886B8213891608BE110ECE0202920B9E24
-:1053700025B4991EDDA19F200E88029822C0EF045B
-:10538000D8110E88029824C0E49E21C080D2A02BA0
-:10539000600C2864071CDD8F0CBE11A7EE2DE28582
-:1053A000ACBB28B4CF0D9D0B2DE685D10F000000BE
-:1053B0006C1004C020D10F006C10048633C071C083
-:1053C00030600001B13300310400741A04620174CA
-:1053D00060F1D10F6C1004022A02033B025BFFF65E
-:1053E0001CDD771BDDBFC79F88B009A903098A01AF
-:1053F0009AB079801EC0F00FE4311DDD6E0002000E
-:105400002BD2821EDDB82AC1020EBB022BD6820A25
-:10541000E431D10F28C102C19009880208084F2841
-:10542000C50208E431D10F006C1004C0C00CE43197
-:1054300012DD631ADD6000020029A28218DDAC1BB8
-:10544000DDAA2621020B990108660129A6822625DC
-:105450000206E43114DDA715DDA2236A902326128B
-:105460008550242611252613222C40D10F00000040
-:105470006C1008D6102B0A64291AB41ADD4D0D23BE
-:10548000111CDD4E0F2511B81898130E551118DD9B
-:1054900099AC55A838AA332C80FF2A80FEA933285E
-:1054A0008D0129800108AA112880000CAA02088811
-:1054B0001109880208AA1C288C08281604580862BA
-:1054C00014DD3F0AA7022441162A30802B1204075C
-:1054D000AA2858085DB1338B13B4559A6004AC28E0
-:1054E000B4662C56277B69E016DD769412C050C056
-:1054F000D017DD329D15D370D4102F60802E6082BE
-:105500009F169E17881672891A8D128C402A607F0A
-:105510000DCC282B3A200CAA2858084BC0B10ABE43
-:10552000372E35408F1772F91A8D128C402A608100
-:105530000DCC282B3A200CAA28580843C0B10ABE2B
-:10554000372E3542B233B444B1556952B6B466C051
-:10555000508F15B877D370B2FF9F156EF899D10FA1
-:105560006C1004C021D10F006C1004270A001CDD50
-:10557000111FDD221EDD251DDD0E1ADD501BDD5E37
-:10558000C02824B0006D2A75AA48288080C0916484
-:10559000806100410415DD09C03125502E00361A06
-:1055A0000655010595390C56110C66082962966E50
-:1055B000974D0D590A29922468900812DD42024243
-:1055C0000872993B23629512DD06CB349F3002822C
-:1055D000020E4402C092993194329233AD52246249
-:1055E00095C090244C1024669524B0002924A0AACC
-:1055F00042292480B177B14404044224B400D10F7D
-:10560000D10FD10F6C10041ADCEA2AA00058021C3A
-:105610005BFFD5022A02033B025BFFD11BDCE8C91A
-:10562000A12CB102C0D40DCC020C0C4F2CB5020C35
-:10563000E431D10FC0A00AE43118DCDE0002002FF3
-:10564000828219DCF12EB10209FF022F86820EE45C
-:1056500031D10F006C1004C02002E43114DCD816E4
-:10566000DCD5000200226282234102732F0603E48C
-:1056700031C020D10F19DD221ADD212841020A2A6A
-:10568000010988012A668228450208E43115DD18DF
-:1056900012DD1D25461DD10F6C1004292006289C03
-:1056A000F96480A02A9CFD65A0968A288D262F0A81
-:1056B000087AD9042B221FC8BD2C206464C0812E17
-:1056C00022090EAE0C66E0782B200C1EDCBA0CBC56
-:1056D00011AECC28C28619DCB878F3026000AD099F
-:1056E000B90A2992A36890082E220009EE0C65E001
-:1056F0009B29C2851FDCC26490929F90C0E41FDC8E
-:10570000CE9E9128200AC0E09E930F88029892882E
-:10571000200F880298942F20079A979D962F950A1C
-:105720002E240728200629206468833328C2851286
-:10573000DCA9288C20A2B22E24CF28C685C020D177
-:105740000FC020D10F2A206A0111020A2A4165AF39
-:1057500052DA20C0B05805E464AFE5C021D10F0093
-:10576000649FC81FDC962D20168FF209DD0C00F116
-:105770000400DD1AADAD9D2912DC9728C285A2B2C6
-:105780002E24CF288C2028C685C020D10FC021D13F
-:105790000F0000006C1004260A001BDCDB15DC8700
-:1057A00028206517DC84288CFE6480940C4D110D34
-:1057B000BD082CD2F52BD2F42ED2F77CB13DB4BB70
-:1057C0002BD6F47BE9052BD2F62BD6F47CB92C2A08
-:1057D000D2F62AD6F52AD6F406E431000200287261
-:1057E000822AFAFF004104290A012F510200991A66
-:1057F0000A99030988012876820FE4312624652B53
-:10580000D2F48E5A2CD2F5B0EE9E5A7BCB1629D20A
-:10581000F62FD2F70CB80C09FF0C08FF0C0F2F1451
-:10582000C8F96000320BCA0C0A2A14CEA92B510207
-:10583000C0C20CBB020B0B4F2B55020BE431D10F36
-:1058400000DB30DA205BFF941BDCB064AF5D0C4DF5
-:1058500011ADBD63FFA8000006E4310002002F7205
-:105860008218DC6E2E510208FF022F76820EE43180
-:10587000D10F00006C1004C03003E43116DC4E156B
-:10588000DC4F00020024628274472118DCA0875A92
-:10589000084801286682CD7319DC9E0C2A11AA994A
-:1058A0002292832992847291038220CC292B510267
-:1058B0000BE431C020D10F001FDC972E51020FEEF8
-:1058C000012E55020EE431B02DB17C9C5A12DC92AF
-:1058D00008DD112D5619D10F6C10061BDC351EDCAE
-:1058E0003722B0001ADC8E6F23721DDC75C0481899
-:1058F000DC8D1FDC8BDC10D5C083F000808600506F
-:105900008A6D4A4F0F35110D34092440800B560A19
-:10591000296294B1330E55092251400F44110C44B1
-:105920000A874009A80C02883622514107883608A8
-:10593000770CA8992966949740296295874109A810
-:105940000C02883607883608770CA899296695973F
-:1059500041030342B13808084298F0D10F1CDC72B1
-:1059600013DC7327B0002332B5647057C091C0D0E8
-:1059700016DC7115DC6FC0402AC00003884328C4C0
-:10598000006D793C004104B14400971A7780148E71
-:10599000502FB2952DB695AFEE2EED2006EE369E29
-:1059A0005060001877A00983509D5023B695600081
-:1059B0000223B295223D2006223622B695B455B870
-:1059C000BBD10F0003884328C400D10F6C1004C062
-:1059D0004004E43115DC59000200885013DC58CB38
-:1059E000815BFFBD1CDC570C2D11ADCC2BC2822A74
-:1059F000C28394507BAB142EC28429C2850ABD0C8D
-:105A00000E990C0D990C0929146000050BA90C09BD
-:105A10002914993015DBEA2A51020AE4312A2CFCB8
-:105A200058004B2B32000AA2022BBCFF9B30CCB695
-:105A3000C8A4D2A0D10F000004E4311EDBDE0002B6
-:105A4000002DE2822FBAFF2C51020FDD012DE682DC
-:105A50000CE431D10F0000006C1004D10F000000E5
-:105A60006C1004C020D10F006C100413DC36C0D1C0
-:105A700003230923318DC0A06F340260008D19DB30
-:105A8000CD1BDBCE17DC2F0C2811A87726728325BF
-:105A900072822CFAFF76514788502E7285255C045D
-:105AA00025768275E9052572842576827659292E18
-:105AB00072842E76822E76830AE4310002002392CD
-:105AC000820021042FB10200D61A0C6603063301AE
-:105AD0002396820FE43126728325728260000200D1
-:105AE000D8A07659220AE4310002002392820021D4
-:105AF0000400D21A2FB1020C220302320122968234
-:105B00000FE431D280D10F00D280D10FC020D10F4D
-:105B10006C1004DB30862015DBA6280A002825023D
-:105B2000DA2028B0002CB00705880A28824C2D0AFC
-:105B3000010B8000DBA065AFE61ADB9F0A4A0A2949
-:105B4000A2A3C7BF769101D10F2BA6A3D10F00004E
-:105B50006C1004C0D1C7CF1BDB9919DB9617DB94FF
-:105B60000C2811A87786758574C0A076516288507C
-:105B70008E77B455957475E90385769574765927B3
-:105B80008F769F759F740AE431000200239282B4DD
-:105B90002E2FB10200E10400D61A0C660306330171
-:105BA0002396820FE431867583747639280AE431AE
-:105BB0000002002E9282B42200210424B10200DFF0
-:105BC0001A0CFF030FEE012E968204E431D280D12D
-:105BD0000FD8A07651D6D280D10F00006C100429C6
-:105BE0000A801EDB9A1FDB9A1CDB730C2B11ACBBEB
-:105BF0002C2CFC2DB2850FCC029ED19CD0C051C064
-:105C00007013DB9614DB9518DB932AB285A8280461
-:105C1000240A234691A986B8AA2AB685A98827848A
-:105C20009F25649FD10F00006C100419DBC70C2A5C
-:105C300011A9A98990C484798B761BDBB5ABAC2AFA
-:105C4000C2832CC2847AC1688AA02BBC30D3A064E2
-:105C5000A05E0B2B0A2CB2A319DB7F68C0071DDBEB
-:105C6000BBD30F7DC94AA929299D0129901F68919D
-:105C70003270A603D3A0CA9E689210C7AF2AB6A3FB
-:105C80002A2CFC5BFFB3D230D10F000013DBB10331
-:105C9000A3018C311DDB510C8C140DCC012CB6A34F
-:105CA00063FFDC00C020D10FDA205BFFCCC020D125
-:105CB0000FC020D10F0000006C1004DB30C0D019E1
-:105CC000DB3CDA2028300022300708481209880A15
-:105CD00028824CDC200B80001BDB370C4A11ABAA5E
-:105CE00029A28409290B29A684D10F006C1004C0B5
-:105CF0004118DB3017DB320C2611A727277030A89C
-:105D000066256286007104A35500441A7541482235
-:105D1000628415DB5202320BC922882117DB2F085F
-:105D20008414074401754905C834C020D10FD10F30
-:105D30000809471DDB86C0B28E201FDB1D0E0E43F7
-:105D4000AFEC2BC4A00FEE0A2DE6242A6284C020FB
-:105D50000A990B296684D10FC020D10F6C1004DB87
-:105D600030C0D018DB13DA20253000223007085865
-:105D70000A28824CDC200B80008931709E121BDBCC
-:105D80000D0C4A11ABAA29A28409290B29A684D19A
-:105D90000F09C95268532600910418DB08C0A12FCF
-:105DA000811200AA1A0AFF022F85121EDB020C4D77
-:105DB00011AEDD2CD2840C2C0B2CD684D10FC081DB
-:105DC0001FDAFFB89A0A0A472EF11200A1040088D0
-:105DD0001A08EE022EF5121DDAF70C4C11ADCC2B81
-:105DE000C2840B2B0B2BC684D10F00006C1004DB7C
-:105DF00030C0D019DAEFDA202830002230070988C5
-:105E00000A28824CDC200B80001CDAEA0C4B11AC17
-:105E1000BB2AB2840A2A0B2AB684D10F6C1004C0A4
-:105E20004118DAE416DAE60C2711A626266030A817
-:105E300072252286006104A35500441A7541082288
-:105E4000228402320BD10F00C020D10F6C10041538
-:105E5000DB410249142956112452120208430F88CB
-:105E600011C07300810400361A008104C78F0077C7
-:105E70001A087703074401064402245612D10F0082
-:105E80006C10066E23026000AC6420A7C0A08510D1
-:105E900013DB1916DB30C040A6AA2BA2AE0B1941AA
-:105EA00064906668915D68925268933C2AA2AA2821
-:105EB0003C7F288C7F0A0A4D2980012880002AAC6B
-:105EC000F20888110988027589462B3D0129B00026
-:105ED0002BB0010899110B99027A9934B8332A2A08
-:105EE00000B1447249B160004A7FBF0715DB1B63F4
-:105EF000FFB90000253AE863FFB10000253AE863E6
-:105F0000FFA90000250A6463FFA1C05A63FF9C003B
-:105F100000705F082534FF058C142C34FE70AF0B25
-:105F20000A8D142E3D012AE4012DE400DA405BFDC8
-:105F30005063FFA7D10FD10F6C10041ADAA019DA41
-:105F40009D1CDB061BDB07C080C07160000D0000DC
-:105F50000022A430B1AA299C107B915F26928679F9
-:105F6000C2156E6262C0206D080AB12200210400D1
-:105F7000741A764BDB63FFEE2292850D63110325C5
-:105F800014645FCFD650032D436DD9039820B422FB
-:105F90000644146D49229820982198229823982429
-:105FA00098259826982798289829982A982B982CED
-:105FB000982D982E982F222C4063FF971EDA7E276B
-:105FC000E68027E681D10F00C02063FF8300000038
-:105FD0006C1004C062C04112DA791ADA7513DAE182
-:105FE0002AA00023322D19DADB2BACFE2992AE6EEB
-:105FF000A30260008E090E402D1AC2C2CD0EDC39FC
-:106000002C251664B0895BFF9E15DAD71ADAD12BDE
-:106010003AE80A3A0158058C2B21160ABB28D3A06E
-:106020009B505805A32B52000ABB082A0A005805AA
-:10603000A215DACE2D21022C3AE80C3C2804DD0210
-:106040002D25029C5058059A8B50AABBC0A158051B
-:106050009A1CDAC72D21020C3C2806DD0213DAC592
-:106060002D25029C305805928B30AABBC0A2580542
-:10607000922A2102C0B40BAA020A0A4F2A2502580A
-:1060800005A6D10F242423C3CC2C251663FF76004C
-:1060900018DABD1CDAB919DABA1BDAB817DA8B8547
-:1060A000202E0AFD1FDAB92D202E24F47A24F47E46
-:1060B00024F4820EDD0124F4862E0AF70755280603
-:1060C000DD02C0750EDD01050506AB5BA959C0E810
-:1060D000AC5C24C4AB0EDD0227C4AC2E0ADFA8558D
-:1060E00027B4EC0EDD0124B4EBC2E027942C0EDDC6
-:1060F0000224942B2E0A800D0D4627546C24546BD9
-:106100000EDD022D242E63FEFC0000006C10042A1C
-:106110000A302B0A035BFF4D12DA8FC39029261633
-:10612000C3A1C0B3C08A2826175BFF48C03CC3B1D7
-:106130002B26161ADA222AA02023261764A079C358
-:10614000A2C0B15BFF42C3A2C0B15BFF40C3C22C7F
-:106150002616C2AFC0B12326175BFF3CC28F28268C
-:1061600016C0FE2F2617C2E22E26162A0AA1C0B19B
-:10617000C0D82D26175BFF352A0AA12A2616C3A6EA
-:10618000C0B3C1922926175BFF31C3C62C2616C1A6
-:10619000B32A0AA22B2617C0B35BFF2C290AA22917
-:1061A0002616C185282617C2FB2F2616C0E72E26E5
-:1061B000171DDA762D2610D10FC3A2C0B35BFF23C3
-:1061C00063FF82006C10041CDA3F1BDA2C18DA70B3
-:1061D00017DA7116DA7115DA71C0E0C0D414DA3B3F
-:1061E0001FD9F7C0288FF06D2A36DAC0D9C07C5B82
-:1061F000020FC90C1CDA350C9C28A8C3A6C22A368B
-:10620000802A2584A4C2A7CC2D248C2B248A2B245D
-:10621000872E248BB1BB2E369F2C369E2C369DB1FB
-:10622000AC1CDA161BDA5FC0286D2A33DAC0D9C07D
-:106230007C5B020FC90C1CDA240C9C28A8C3A6C2E4
-:106240002A36802B2584A4C2B1BBA7CC2D248C2E4A
-:10625000248B2A248A2E369F2C369E2C369DB1AC58
-:10626000C07919DA141BDA5113DA4F1ADA4F18DA37
-:106270005014DA1516DA5004F42812DA4F04660CBA
-:10628000040506A252A858AA5AA3539B3029A50078
-:1062900027848AC091C0A52A848C29848B17DA4868
-:1062A00018DA47A75726361D26361E2E361F16DA51
-:1062B0004513DA45A65504330C2826C82E75002D43
-:1062C00054AC2E54AB2E54AA2326E62326E52E26C4
-:1062D000E7D10F006C100613DA2317DA1E24723D83
-:1062E0002232937F2F0B6D08052832937F8F026334
-:1062F000FFF3C0C4C0B01AD9B1C051D94004593954
-:1063000029A4206E44020BB502C3281ED9ACDDB00F
-:1063100025E422052D392DE421C0501EDA2C19DA8E
-:106320001C18DA1C16DA1E1DDA2A94102A72451778
-:10633000D9E76DA94BD450B3557A5B17DF50756B15
-:10634000071FD99E8FF00F5F0C12D9DF02F228AE23
-:106350002222D681D54013D9DC746B0715D99885D4
-:106360005005450C035328B145A73FA832A9332255
-:10637000369D22369E2436802B369F2BF48B2CF4B0
-:106380008C14D9F824424DC030041414C84C6D0844
-:1063900006B133041414C84263FFF20015D985C452
-:1063A000400031041AD986C0D193A200DD1AC13849
-:1063B000B0DD9DA318D9EC2B824D29824E29A51C56
-:1063C0002882537A871E2C54008E106FE45D12D9F8
-:1063D0007B2F211D23211C2F251B04330C23251C5F
-:1063E00023251AD10FC06218D9DB88807E87D9890E
-:1063F000102654006F94191BD9712AB11C0A1A1463
-:1064000004AA0C2AB51C2AB51D2AB51A2AB51BD117
-:106410000F1BD96A2AB11C0A1A1403AA0C2AB51C2C
-:106420002AB51D2AB51A2AB51BD10F001CD9642B19
-:10643000C11D2DC11C2BC51B03DD0C2DC51C2DC57D
-:106440001AD10F006C100619D95D14D9C212D9C522
-:1064500015D9E0C73FC0E02E56A82E56A92E56AA41
-:106460002E56AB23262918D985DB101CD9DAC0D4C7
-:106470002A42452D16019C1000B0890A880C2896E6
-:10648000005BFF942B22E318D94D0B5B149B842AED
-:1064900022E48B84B1AA0A5A140BAA0C9A852922E9
-:1064A000E509591499862F22CD0F5F149F875BFF52
-:1064B000455BFF1623463BC1B01DD9401CD99E2A1F
-:1064C000D1022C463A0BAA020A0A4F2AD5025804D6
-:1064D000925BFEBF5BFE98C050C0B016D93614D98F
-:1064E0003E17D9AEC0C0C73E93122C262DC03060D7
-:1064F00000440000007F9F0FB155091914659FF4F7
-:10650000C0500AA9027FA7EF18D92ADA5008580A02
-:1065100028822C2B0A000B8000005104D2A0C091CD
-:10652000C7AF00991A0A99039912CE3864206BD329
-:10653000202B20072516032C12022A62827CA863D6
-:1065400018D91C01110208580A28822CDA500B8035
-:1065500000D2A0643FD58A310A8A1404AA01C82A4D
-:106560002B22010B8B1404BB017BA945DDA07A7B98
-:10657000081DD9122DD2000DAD0CDB3019D90D1A22
-:10658000D95288130ADA28DC801DD99009880A2894
-:10659000823C0DAA080B8000652F93D320C0B06306
-:1065A000FF9400007FAF34B1550050040A0919630D
-:1065B000FF42DAB07B7B081AD9012AA2000ABA0C82
-:1065C0001BD9428C310BAB280C8A141CD980ACBB74
-:1065D0001CD98004AA012BC68163FF8F645F60C051
-:1065E00050C0B0C7CE9C1263FF5500006C1004274A
-:1065F000221EC08008E4311BD8EF0002002AB282BC
-:1066000019D8EF003104C06100661A2991020A6AA4
-:10661000022AB68209E43115D94A0C3811A8532848
-:1066200032822432842A8CFC7841102921022A36B5
-:106630008297A0096902292502D10F002B21022C83
-:1066400032850B6B022CCCFC2C368297C02B25029A
-:10665000D10F00006C1004C0E71DD8D21CD8D40D97
-:106660004911D7208B228A200B4B0BD2A007A80CF4
-:106670009B72288CF4C8346F8E026000A31FD8CAA6
-:10668000A298AF7B78B334C93DC081C0F0028F3887
-:106690000F0F42C9FA2CD67ED5206D4A05003088EE
-:1066A00000508C887008980878B16DD2A09870D18D
-:1066B0000FC0F0038F387FE0DE63FFD8027B0CAFA2
-:1066C000BB0B990C643047D830C0F1C05002F5388C
-:1066D0000505426450792CD67E0B36122F6C100FB4
-:1066E0004F366DFA0500808800208C06440CC0816E
-:1066F000C05003B208237C0C038538050542645062
-:106700005A2CD67ED30F6D4A0500208800308CD2DB
-:10671000A0A798BC889870D10FD2A0BC799970D1ED
-:106720000FD2302BAD08C0F1C0500BF53805054233
-:10673000CB542CD67E083F14260A100F660C064652
-:10674000366D6A0500208800B08C827063FF2D00D2
-:10675000C05003F53875E08063FF7A00C0600286A0
-:106760003876E09F63FF9900C05003F53875E0C4A8
-:1067700063FFBE006C1004D62068520F695324DA00
-:1067800020DB30DC405800F3D2A0D10FDA20DB3020
-:10679000DC405800F09A2424240EC02122640FC04B
-:1067A00020D10F00B83BB04C2A2C7489242D200E28
-:1067B0002E200FA4DDB1EE2E240FB0DD2D240E28E7
-:1067C00090072D9003A488B088B1DD2D9403289400
-:1067D000075BFFA069511DC0E082242A600F18D812
-:1067E000FE2A240329600E8F2029240708FF029F18
-:1067F000209E64D10FC020D10F0000006C100494C3
-:106800002319D8F6C0B3083A110BAA02992019D857
-:10681000699A2116D867C05028929D2564A2288CB9
-:106820001828969DD10F00006C1004282066C038EF
-:10683000232406B788282466D10F00006C100603B5
-:106840005A0C0D36110D5C11D8208B2282210CBB05
-:106850000C06550F9B8202320B928113D853D9201C
-:10686000A38F6450561CD84FC0D71BD850A256C017
-:10687000E1C09004E93809094276F34F044302CAA3
-:10688000912BC67ED30F6DAA0500208800308C891D
-:1068900081A95909FA0C64A07D99818A8264A00FAC
-:1068A000D290D10FC06002E63876D0D763FFD10016
-:1068B000C020BC89998199809282D10F7F230429BD
-:1068C0002DF8998165BFD863FFE50000028F0CA306
-:1068D000FF0F3312931003AA0CD3406490402BC6D1
-:1068E0007E8610D30F6D6A0500208800308CBC8234
-:1068F000C090A4F3C041034938090942CA9B2BC682
-:106900007E6DAA0500208800308C0F590CA989BC27
-:1069100099998163FF8400BC89998163FF7C00C0E1
-:106920006002E63876D0B963FFB300C07002473822
-:1069300077D0CD63FFC700006C100414D82AC15271
-:10694000A424CA3128221D73811C292102659016B6
-:106950002A300075A912022A02033B022C3007C01C
-:10696000D25801D0653FDCD10F2B300703BB0B0B96
-:10697000BA0274B3022ABDF8D3A063FFC4000000BA
-:106980006C1004292006C0706E9741292102C08F27
-:106990002A2014C0B62B240606AA022A24147980C1
-:1069A000022725022A221E2C221D7AC10EC8ABDA2C
-:1069B00020DB302C0A00033D025BF80D6450742D7F
-:1069C00021020D0D4CC9D3C020D10F00002E9CFB1D
-:1069D00064E0822F21020F0F4C65F0911AD7F61C4C
-:1069E000D7F429A29EC08A798B5D2BC22668B00499
-:1069F0008D207BD95229A29DC0F364904A97901DA7
-:106A0000D8062E21049D9608EE110FEE029E979E49
-:106A10009118D802C0E527C4A22E24062BA29D2FD0
-:106A200021022BBC3008FF022F25022BA69DC0207F
-:106A3000D10F00002F300068F939DA20DB30044C28
-:106A40000258004463FF7700022A022B0A0658000E
-:106A5000D3220A00D10F6550102830006889240223
-:106A60002A02033B02DC4058003BC020D10FD27009
-:106A7000D10F00002A2C74033B02044C025BFEF58C
-:106A800063FF3B00DB30DC402A2C745BFEF2C0204D
-:106A9000D10F00006C1004C83F89268829A399995A
-:106AA0002609880C080848282525CC52C020D10F7B
-:106AB000DB402A2C745BF936D2A0D10F6C1004D8BD
-:106AC00020D73082220D451105220C928264207459
-:106AD00007420B13D7B5D420A383732302242DF8C8
-:106AE000858074514CBC82C0906D081600408800AF
-:106AF000708C773903D720C0918680743901D420F7
-:106B000074610263FFE2CA98C097C0411BD835C0C8
-:106B1000A00B8B0C0B4A380A0A42C9AA1DD7A21C2B
-:106B2000D7A32CD67EC140D30F6D4A050020880024
-:106B3000308C9780D270D10FBC8FC0E00F4E387E62
-:106B400090E263FFD6BC8292819280C0209282D173
-:106B50000F0000006C1006C0D71CD7921BD7940DF5
-:106B60004911D7202E221F28221D0E4E0BD280073E
-:106B70008A0C2E761F2AAC80C8346FAE026000CB20
-:106B80002F0A801AD798A29EAA7A7EA33FC93FC037
-:106B9000E1C05002E538050542CA552BC67EDB2010
-:106BA000D30F6D4A0500308800B08C2E721DAE9E4A
-:106BB0000EA50C645086D2802E761DC091298403C8
-:106BC000D10FC05003E53875D0D363FFCD15D785FD
-:106BD000027E0CA5EE643051C0A1250A0002A53842
-:106BE000033A020505426450922BC67E0E3512957B
-:106BF00010255C10054536D30F6D5A0500A088009E
-:106C0000208CC0A1A3E2C05023FA8003730C03A51B
-:106C100038AF730505426450722BC67E851005455A
-:106C20000C6D5A0500208800308CD280C0A10E9BCC
-:106C30000CAB7BAFBB2B761D2A8403D10FD280C057
-:106C4000C1AF7D2D761D2C8403D10F00D2302E8D47
-:106C500008C0F1C0500EF538050542CB592BC67E51
-:106C60000A3F14C1600F660C064636D30F6D6A05E5
-:106C700000208800E08C22721D63FF03C061C050B9
-:106C800003653875D80263FF6263FF5CC05002A5DC
-:106C90003875D08763FF8100C06003F63876D0BFB7
-:106CA00063FFB9006C10042A201529201614D7435D
-:106CB0000A990CCB9D2E200B04ED092BD11C8F289B
-:106CC00009BC36ACAA0CBB0C2BD51C0A0A472A24DB
-:106CD00015CAAF8B438942B0A800910400881AA856
-:106CE000FF0FBB029B278F260FB80C783B1AC020E2
-:106CF000D10F0000292102C0A20A9902292502C051
-:106D000021D10F008B2763FFDC2BD11C0CAA0C0AAE
-:106D10000A472A2415ACBB2BD51CC9AE8B438C2843
-:106D20008F42B0AD00F10400DD1AADCC0CBB029B6C
-:106D300027DA20B7EB580019C021D10F9F2763FF36
-:106D4000EF0000006C100428203C643047053060E0
-:106D500000073E01053EB156076539054928C77F42
-:106D6000A933030641076603B166060641A6337ED2
-:106D7000871E222125291AFC732B1502380C098144
-:106D80006000063E01023EB12406423903220AD1C8
-:106D90000FD230D10FC05163FFC000006C10042728
-:106DA000221EC08008E4311DD7030002002CD282CD
-:106DB0001BD703003104C06100661A2BB1020C6CB2
-:106DC000022CD6820BE43119D7870C3A11AA9328EA
-:106DD00032829780253282243284B45525368275DA
-:106DE000410A292102096902292502D10F2A21021B
-:106DF0002B32830A6A022B36822A2502D10F000029
-:106E00006C100418D6EC0C2711087708267286251A
-:106E10003C04765B1315D6E805220A2222A36820DB
-:106E200002742904227285D10FC020D10F00000006
-:106E30006C100419D6EB27221EC08009770208E4E3
-:106E4000311DD6DC0002002CD2821BD6DC003104BE
-:106E5000C06100661A2BB1020C6C022CD6820BE4C6
-:106E60003119D7600C3A11AA9328328297802532C3
-:106E700082243284B45525368275410B2A21020AB8
-:106E80006A022A2502D10F002B21022C32830B6BC0
-:106E9000022C36822B2502D10F0000006C10041B3F
-:106EA000D6C50C2A11ABAA29A286B438798B221B2D
-:106EB000D6C219D6E80B2B0A2BB2A309290868B051
-:106EC0000274B90D299D0129901F6E920822A28596
-:106ED000D10FC020D10FC892C020D10FDA205BEEB5
-:106EE000B3C020D10F0000006C100414D6B22842A9
-:106EF0009E19D6AF6F88026000BA29922668900763
-:106F00008A2009AA0C65A0AC2A429DC0DC64A0A41A
-:106F10002B200C19D6A90CBC11A4CC2EC28609B901
-:106F20000A7ED30260009A2992A36890078D2009F7
-:106F3000DD0C65D08C25C2856450862D2104C030BF
-:106F40006ED80D2C2066B8CC0C0C472C246665C07E
-:106F50007B1CD72518D6AF1AD6A619D6B61DD6AB28
-:106F6000C0E49E519D508F209357935599539A5644
-:106F70009A5408FF021AD6C29F5288269F5A9E59D9
-:106F80009D58935E9C5D935C9A5B08084805881148
-:106F9000985FC0D81FD6900CB911A499289285AFDC
-:106FA000BF23F4CF288C402896858E262D24069E5C
-:106FB00029C020D10FCA33DA20C0B65BFF78C72FB3
-:106FC000D10FC93ADA205BFF75C72FD10FDBD05B39
-:106FD000FE0B2324662B200C63FF7500C72FD10FF7
-:106FE000C72FD10F6C1004C85B29200668941C6859
-:106FF0009607C020D10FC020D10FDA20DB30DC4053
-:10700000DD502E0A005BFE5ED2A0D10F2E200C18A0
-:10701000D6690CEF11A8FF29F286C088798B791AFE
-:10702000D6660AEA0A2AA2A368A0048B207AB96865
-:1070300023F2856430621BD670290A802C206828D0
-:1070400020672D21040B881104DD1108DD020DCC11
-:1070500002C0842D4A100DCC021DD66898319D3097
-:107060008A2B99379C340BAA02C0C09C359C369A57
-:10707000322A2C74DB4028F285C0D3288C2028F6D5
-:10708000852C25042D24061FD653DD40AFEE2CE4BD
-:10709000CF5BFDEAD2A0D10F00DA20DBE05BFF3F3F
-:1070A000C020D10F6C100AD6302A200624160128E1
-:1070B000ACF86583862B2122C0F22A2124CC572AE2
-:1070C000AC010A0A4F2A25247ABB0260037F2C21D7
-:1070D000020C0C4C65C3192E22158D32C0910EDDA9
-:1070E0000C65D39088381ED63364836B8C37C0B858
-:1070F000C0960CB9399914B49A9A120D9911991332
-:107100008F6718D62EC9FB2880217F83168B142CFD
-:1071100022002A200C5BFF61D4A064A3B38F6760B8
-:10712000002800002B200C89120CBA11AEAA2CA248
-:10713000861DD6217C9B3E0DBD0A2DD2A368D004AE
-:1071400088207D893024A28564436427212E07F797
-:107150003607F90C6F9D01D7F0DA20DB70C1C42D22
-:10716000211F5BFEF889268827DDA009880C7A8B11
-:10717000179A10600006C04063FFCC0000DA208B35
-:10718000105BFEC88D1065A267C0E09E488C649CB1
-:10719000498B658A669B4A9A4B97458F677F730236
-:1071A000600120CD529D10DA20DB302C12015BFEF5
-:1071B000698D10C051D6A08FA7C0C08A68974D9A1C
-:1071C0004C8869896A984E994F8E6A8A69AE7E7733
-:1071D000EB01B1AA9E6A9A698B60C0A00B8E1477EE
-:1071E000B701C0A1C091C08493159D179516C0D05A
-:1071F00025203CC030085801089338C0820833105D
-:10720000085B010535400B9D3807DD100BAB100EF8
-:1072100019402A211F07991003DD020DBB020553F7
-:10722000100933020A55112921250A2A14092914A3
-:107230000499110A99020933028A2B2921040BAA05
-:10724000021BD66A0899110955020855020BAA02B9
-:107250009A408920881408991109880219D5EA1DD5
-:10726000D66409880298418B2A9346954783150D69
-:10727000BB0285168D179B448A658966AACAA97CBC
-:1072800077CB01B1AA07FB0C9C669A6588268E29EC
-:10729000AD87972607EE0C0E0E482E25259B672BF3
-:1072A000200C87131ED5C40CB911AE99289285A75E
-:1072B0008828968517D5C8C090A7BB29B4CF871852
-:1072C00063FE3C008C60C0E0C091C0F0C034C0B828
-:1072D0002A210428203C08AA110B8B0103830103F7
-:1072E0009F380B9B39C03208FF10038801089E3875
-:1072F0000C881407EE100FEE020388010898390578
-:10730000BF1029211F0ABB1107881008FF020BAA12
-:107310000218D5BC09291403AA022B212583200BAE
-:107320002B1404BB110833110FBB020B99028B14F1
-:107330008F2A0B33020833028B2B64708688689780
-:107340004D984C8769886A93419946974E984FC0EB
-:107350007077C701C0719A4718D6260B7C100CECC9
-:107360000208F802984418D6230CBC0208CC029CF0
-:10737000402A200C295CFEC0801FD58E1CD5960C9F
-:10738000AE112B2124ACAAAFEEB0BB8F132CE2853B
-:1073900028A4CFAFCC2CE6852A22152B2524B1AA10
-:1073A0002A26156490DBC9D28F262E22090DFF08EC
-:1073B0002F26060FEE0C0E0E482E25256550E4C034
-:1073C00020D10F00C07093419F4499469A4777C7D8
-:1073D0000A1CD57A2CC022C0810C87381CD6070B1A
-:1073E000781008E80208B8020C8802984063FF8011
-:1073F00000CC57DA20DB608C115BFDD629210268B6
-:107400009806689403C020D10F2B221EC0A0292209
-:107410001D2A25027B9901C0B064BFE813D5652CF5
-:10742000B00728B000DA2003880A28824CC0D10BAC
-:107430008000DBA065AFE763FFCA000068A779DAC8
-:1074400020DB30DC40DD505BFEE7D2A0D10FC16D08
-:10745000C19D29252C60000429252CD6902624675F
-:107460002F2468DA20DB308C11DD502E0A805BFD82
-:1074700044D2A0D10FC168C1A82A252C63FFDD002A
-:107480000000C8DF8C268B29ADCC9C260CBB0C0BD6
-:107490000B482B25252A2C74DB602C12015BFD8701
-:1074A000D2A0D10F2A2C748B115BF6B9D2A0D10FC8
-:1074B000DA205BFE3A63FF3800DA20C0B15BFE8A57
-:1074C00064ABF1655F352D2124B1DD2D252463FFEB
-:1074D0001FDA202B200C5BFE5663FF1412D5C882E6
-:1074E00020028257C82163FFFC12D5C403E8300490
-:1074F000EE3005B13093209421952263FFFC00000B
-:1075000010D5C0910092019302940311D597821077
-:1075100001EA30A21101F031C04004E4160002007B
-:1075200011D5B98210234A00032202921011D5828C
-:10753000C021921004E4318403830282018100009F
-:10754000D23001230000000010D5B09100920193C9
-:1075500002940311D586821001EA30A21101F131A3
-:10756000C04004E41600020011D5A7821013D52BE9
-:10757000032202921004E431840383028201810019
-:1075800000D330013300000010D5A19100810165C6
-:10759000104981026510448103CF1F920193029428
-:1075A0000311D574821001EA30A21101F231C040FA
-:1075B00004E41600020011D593821013D5130322A0
-:1075C00002921004E431840383028201C01091030B
-:1075D00091029101810000D43001430012D542C0D4
-:1075E0003028374028374428374828374C233D0176
-:1075F0007233ED03020063FFFC00000010D585919B
-:107600000092019302940311D5838210921011D538
-:10761000348310032202921011D58012D5469210A5
-:10762000C04004E41600020011D577821013D52D56
-:10763000032202921004E431840383028201810058
-:1076400000D53001530000006C10026E322FD6209E
-:10765000056F04043F04745B2A05440C00410400D8
-:10766000331A220A006D490D73630403660CB122BC
-:107670000F2211031314736302222C01D10FC83B94
-:10768000D10F000073630CC021D10F000000000077
-:1076900044495630C020D10F6C10020040046B4C9E
-:1076A00007032318020219D10F020319C020D10FBA
-:1076B0006C100202EA30D10F6C1002CC2503F031BD
-:1076C00060000F006F220503F1316000056F230594
-:1076D00003F231000200D10F6C1002CC2502F03011
-:1076E000D10F00006F220402F130D10F6F2304028A
-:1076F000F230D10FC020D10F6C1002220A20230AD1
-:10770000006D280E28374028374428374828374C42
-:10771000233D01030200D10F6C100202E431D10FAE
-:107720000A004368656C73696F20465720444542E0
-:1077300055473D3020284275696C74204672692097
-:107740004D61792020382031363A30373A333620AF
-:107750005044542032303039206F6E20636C656F96
-:1077600070617472612E6173696364657369676EB9
-:107770006572732E636F6D3A2F686F6D652F666546
-:107780006C69782F772F66775F372E31292C20563A
-:10779000657273696F6E2054337878203030372EDD
-:1077A00030342E3030202D203130303730343030EE
-:0877B000100704000071489469
-:00000001FF
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 7f437ca..b840a49 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
+#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5482 0x0143bcb0
+#define PHY_ID_BCM5411 0x00206070
+#define PHY_ID_BCM5421 0x002060e0
+#define PHY_ID_BCM5464 0x002060b0
+#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
#define PHY_BCM_OUI_MASK 0xfffffc00
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
index 2a61eb1..d9cb19b 100644
--- a/include/linux/caif/caif_socket.h
+++ b/include/linux/caif/caif_socket.h
@@ -62,6 +62,7 @@ enum caif_channel_priority {
* @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
* @CAIFPROTO_UTIL: Utility (Psock) channel.
* @CAIFPROTO_RFM: Remote File Manager
+ * @CAIFPROTO_DEBUG: Debug link
*
* This enum defines the CAIF Channel type to be used. This defines
* the service to connect to on the modem.
@@ -72,6 +73,7 @@ enum caif_protocol_type {
CAIFPROTO_DATAGRAM_LOOP,
CAIFPROTO_UTIL,
CAIFPROTO_RFM,
+ CAIFPROTO_DEBUG,
_CAIFPROTO_MAX
};
#define CAIFPROTO_MAX _CAIFPROTO_MAX
@@ -83,6 +85,28 @@ enum caif_protocol_type {
enum caif_at_type {
CAIF_ATTYPE_PLAIN = 2
};
+ /**
+ * enum caif_debug_type - Content selection for debug connection
+ * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain
+ * both trace and interactive debug.
+ * @CAIF_DEBUG_TRACE: Connection contains trace only.
+ * @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug.
+ */
+enum caif_debug_type {
+ CAIF_DEBUG_TRACE_INTERACTIVE = 0,
+ CAIF_DEBUG_TRACE,
+ CAIF_DEBUG_INTERACTIVE,
+};
+
+/**
+ * enum caif_debug_service - Debug Service Endpoint
+ * @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system
+ * @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system
+ */
+enum caif_debug_service {
+ CAIF_RADIO_DEBUG_SERVICE = 1,
+ CAIF_APP_DEBUG_SERVICE
+};
/**
* struct sockaddr_caif - the sockaddr structure for CAIF sockets.
@@ -109,6 +133,12 @@ enum caif_at_type {
*
* @u.rfm.volume: Volume to mount.
*
+ * @u.dbg: Applies when family = CAIFPROTO_DEBUG.
+ *
+ * @u.dbg.type: Type of debug connection to set up
+ * (caif_debug_type).
+ *
+ * @u.dbg.service: Service sub-system to connect (caif_debug_service
* Description:
* This structure holds the connect parameters used for setting up a
* CAIF Channel. It defines the service to connect to on the modem.
@@ -130,6 +160,10 @@ struct sockaddr_caif {
__u32 connection_id;
char volume[16];
} rfm; /* CAIFPROTO_RFM */
+ struct {
+ __u8 type; /* type:enum caif_debug_type */
+ __u8 service; /* service:caif_debug_service */
+ } dbg; /* CAIFPROTO_DEBUG */
} u;
};
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index a55c873..c4627cb 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -30,6 +30,7 @@
#define PCI_EEPROM_WIDTH_93C46 6
#define PCI_EEPROM_WIDTH_93C56 8
#define PCI_EEPROM_WIDTH_93C66 8
+#define PCI_EEPROM_WIDTH_93C86 8
#define PCI_EEPROM_WIDTH_OPCODE 3
#define PCI_EEPROM_WRITE_OPCODE 0x05
#define PCI_EEPROM_READ_OPCODE 0x06
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 2c8af09..c1be61f 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -384,6 +384,15 @@ struct ethtool_rxnfc {
__u32 rule_locs[0];
};
+struct ethtool_rxfh_indir {
+ __u32 cmd;
+ /* On entry, this is the array size of the user buffer. On
+ * return from ETHTOOL_GRXFHINDIR, this is the array size of
+ * the hardware indirection table. */
+ __u32 size;
+ __u32 ring_index[0]; /* ring/queue index for each hash value */
+};
+
struct ethtool_rx_ntuple_flow_spec {
__u32 flow_type;
union {
@@ -457,7 +466,7 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data);
u32 ethtool_op_get_ufo(struct net_device *dev);
int ethtool_op_set_ufo(struct net_device *dev, u32 data);
u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data);
+int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
void ethtool_ntuple_flush(struct net_device *dev);
/**
@@ -576,6 +585,10 @@ struct ethtool_ops {
int (*set_rx_ntuple)(struct net_device *,
struct ethtool_rx_ntuple *);
int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
+ int (*get_rxfh_indir)(struct net_device *,
+ struct ethtool_rxfh_indir *);
+ int (*set_rxfh_indir)(struct net_device *,
+ const struct ethtool_rxfh_indir *);
};
#endif /* __KERNEL__ */
@@ -637,6 +650,8 @@ struct ethtool_ops {
#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */
+#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */
+#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 151f5d7..69b43db 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -91,6 +91,54 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define BPF_TAX 0x00
#define BPF_TXA 0x80
+enum {
+ BPF_S_RET_K = 0,
+ BPF_S_RET_A,
+ BPF_S_ALU_ADD_K,
+ BPF_S_ALU_ADD_X,
+ BPF_S_ALU_SUB_K,
+ BPF_S_ALU_SUB_X,
+ BPF_S_ALU_MUL_K,
+ BPF_S_ALU_MUL_X,
+ BPF_S_ALU_DIV_X,
+ BPF_S_ALU_AND_K,
+ BPF_S_ALU_AND_X,
+ BPF_S_ALU_OR_K,
+ BPF_S_ALU_OR_X,
+ BPF_S_ALU_LSH_K,
+ BPF_S_ALU_LSH_X,
+ BPF_S_ALU_RSH_K,
+ BPF_S_ALU_RSH_X,
+ BPF_S_ALU_NEG,
+ BPF_S_LD_W_ABS,
+ BPF_S_LD_H_ABS,
+ BPF_S_LD_B_ABS,
+ BPF_S_LD_W_LEN,
+ BPF_S_LD_W_IND,
+ BPF_S_LD_H_IND,
+ BPF_S_LD_B_IND,
+ BPF_S_LD_IMM,
+ BPF_S_LDX_W_LEN,
+ BPF_S_LDX_B_MSH,
+ BPF_S_LDX_IMM,
+ BPF_S_MISC_TAX,
+ BPF_S_MISC_TXA,
+ BPF_S_ALU_DIV_K,
+ BPF_S_LD_MEM,
+ BPF_S_LDX_MEM,
+ BPF_S_ST,
+ BPF_S_STX,
+ BPF_S_JMP_JA,
+ BPF_S_JMP_JEQ_K,
+ BPF_S_JMP_JEQ_X,
+ BPF_S_JMP_JGE_K,
+ BPF_S_JMP_JGE_X,
+ BPF_S_JMP_JGT_K,
+ BPF_S_JMP_JGT_X,
+ BPF_S_JMP_JSET_K,
+ BPF_S_JMP_JSET_X,
+};
+
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif
diff --git a/include/linux/if.h b/include/linux/if.h
index be350e6..53558ec 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -73,6 +73,8 @@
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
+#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index c26a0e4..e24ce6e 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <net/netlink.h>
+#include <linux/u64_stats_sync.h>
#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE)
struct socket *macvtap_get_socket(struct file *);
@@ -27,14 +28,16 @@ struct macvtap_queue;
* struct macvlan_rx_stats - MACVLAN percpu rx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
+ * @rx_multicast: number of received multicast packets
+ * @syncp: synchronization point for 64bit counters
* @rx_errors: number of errors
*/
struct macvlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ struct u64_stats_sync syncp;
+ unsigned long rx_errors;
};
struct macvlan_dev {
@@ -56,12 +59,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
{
struct macvlan_rx_stats *rx_stats;
- rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
+ rx_stats = this_cpu_ptr(vlan->rx_stats);
if (likely(success)) {
+ u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;;
rx_stats->rx_bytes += len;
if (multicast)
- rx_stats->multicast++;
+ rx_stats->rx_multicast++;
+ u64_stats_update_end(&rx_stats->syncp);
} else {
rx_stats->rx_errors++;
}
diff --git a/include/linux/in.h b/include/linux/in.h
index 583c76f..41d88a4 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -85,6 +85,7 @@ struct in_addr {
#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
#define IP_MINTTL 21
+#define IP_NODEFRAG 22
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4fbccc5..8fa5e5a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -673,7 +673,7 @@ struct netdev_rx_queue {
* 1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
* (which should normally be dev->stats64) and return a ponter to
* it. The structure must not be changed asynchronously.
- * 2. Define @ndo_get_stats to update a net_device_stats64 structure
+ * 2. Define @ndo_get_stats to update a net_device_stats structure
* (which should normally be dev->stats) and return a pointer to
* it. The structure may be changed asynchronously only if each
* field is written atomically.
@@ -744,6 +744,8 @@ struct net_device_ops {
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
+ int (*ndo_netpoll_setup)(struct net_device *dev,
+ struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -863,7 +865,8 @@ struct net_device {
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
@@ -977,6 +980,7 @@ struct net_device {
struct netdev_queue rx_queue;
rx_handler_func_t *rx_handler;
+ void *rx_handler_data;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
@@ -1044,10 +1048,6 @@ struct net_device {
/* mid-layer private */
void *ml_priv;
- /* bridge stuff */
- struct net_bridge_port *br_port;
- /* macvlan */
- struct macvlan_port *macvlan_port;
/* GARP */
struct garp_port *garp_port;
@@ -1710,7 +1710,8 @@ static inline void napi_free_frags(struct napi_struct *napi)
}
extern int netdev_rx_handler_register(struct net_device *dev,
- rx_handler_func_t *rx_handler);
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
extern void netif_nit_deliver(struct sk_buff *skb);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e9e2312..413742c 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
+int __netpoll_setup(struct netpoll *np);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
+void __netpoll_cleanup(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
@@ -57,12 +59,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline bool netpoll_rx(struct sk_buff *skb)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll_info *npinfo;
unsigned long flags;
bool ret = false;
+ rcu_read_lock_bh();
+ npinfo = rcu_dereference_bh(skb->dev->npinfo);
+
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
- return false;
+ goto out;
spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */
@@ -70,12 +75,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+out:
+ rcu_read_unlock_bh();
return ret;
}
static inline int netpoll_rx_on(struct sk_buff *skb)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
}
@@ -91,7 +98,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
- rcu_read_lock(); /* deal with race on ->npinfo */
if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock);
napi->poll_owner = smp_processor_id();
@@ -108,7 +114,11 @@ static inline void netpoll_poll_unlock(void *have)
napi->poll_owner = -1;
spin_unlock(&napi->poll_lock);
}
- rcu_read_unlock();
+}
+
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+ return irqs_disabled();
}
#else
@@ -134,6 +144,10 @@ static inline void netpoll_poll_unlock(void *have)
static inline void netpoll_netdev_init(struct net_device *dev)
{
}
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 64fb32b..2c87016 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -725,6 +725,12 @@ enum nl80211_commands {
* @NL80211_ATTR_AP_ISOLATE: (AP mode) Do not forward traffic between stations
* connected to this BSS.
*
+ * @NL80211_ATTR_WIPHY_TX_POWER_SETTING: Transmit power setting type. See
+ * &enum nl80211_tx_power_setting for possible values.
+ * @NL80211_ATTR_WIPHY_TX_POWER_LEVEL: Transmit power level in signed mBm units.
+ * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING
+ * for non-automatic settings.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -882,6 +888,9 @@ enum nl80211_attrs {
NL80211_ATTR_AP_ISOLATE,
+ NL80211_ATTR_WIPHY_TX_POWER_SETTING,
+ NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1659,4 +1668,17 @@ enum nl80211_cqm_rssi_threshold_event {
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
};
+
+/**
+ * enum nl80211_tx_power_setting - TX power adjustment
+ * @NL80211_TX_POWER_AUTOMATIC: automatically determine transmit power
+ * @NL80211_TX_POWER_LIMITED: limit TX power by the mBm parameter
+ * @NL80211_TX_POWER_FIXED: fix TX power to the mBm parameter
+ */
+enum nl80211_tx_power_setting {
+ NL80211_TX_POWER_AUTOMATIC,
+ NL80211_TX_POWER_LIMITED,
+ NL80211_TX_POWER_FIXED,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 122d083..ac74ee0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1414,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
- * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
- * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ *
+ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
+ * to reduce average number of cache lines per packet.
+ * get_rps_cpus() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 64
+#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 032a19e..a2fada9 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,6 +24,9 @@ struct __kernel_sockaddr_storage {
#include <linux/types.h> /* pid_t */
#include <linux/compiler.h> /* __user */
+struct pid;
+struct cred;
+
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -309,6 +312,8 @@ struct ucred {
#define IPX_TYPE 1
#ifdef __KERNEL__
+extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
+
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
new file mode 100644
index 0000000..fa261a0
--- /dev/null
+++ b/include/linux/u64_stats_sync.h
@@ -0,0 +1,140 @@
+#ifndef _LINUX_U64_STATS_SYNC_H
+#define _LINUX_U64_STATS_SYNC_H
+
+/*
+ * To properly implement 64bits network statistics on 32bit and 64bit hosts,
+ * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ *
+ * Key points :
+ * 1) Use a seqcount on SMP 32bits, with low overhead.
+ * 2) Whole thing is a noop on 64bit arches or UP kernels.
+ * 3) Write side must ensure mutual exclusion or one seqcount update could
+ * be lost, thus blocking readers forever.
+ * If this synchronization point is not a mutex, but a spinlock or
+ * spinlock_bh() or disable_bh() :
+ * 3.1) Write side should not sleep.
+ * 3.2) Write side should not allow preemption.
+ * 3.3) If applicable, interrupts should be disabled.
+ *
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ *
+ * 5) readers are allowed to sleep or be preempted/interrupted : They perform
+ * pure reads. But if they have to fetch many values, it's better to not allow
+ * preemptions/interruptions to avoid many retries.
+ *
+ * 6) If counter might be written by an interrupt, readers should block interrupts.
+ * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
+ * read partial values)
+ *
+ * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
+ * u64_stats_fetch_retry_bh() helpers
+ *
+ * Usage :
+ *
+ * Stats producer (writer) should use following template granted it already got
+ * an exclusive access to counters (a lock is already taken, or per cpu
+ * data is used [in a non preemptable context])
+ *
+ * spin_lock_bh(...) or other synchronization to get exclusive access
+ * ...
+ * u64_stats_update_begin(&stats->syncp);
+ * stats->bytes64 += len; // non atomic operation
+ * stats->packets64++; // non atomic operation
+ * u64_stats_update_end(&stats->syncp);
+ *
+ * While a consumer (reader) should use following template to get consistent
+ * snapshot for each variable (but no guarantee on several ones)
+ *
+ * u64 tbytes, tpackets;
+ * unsigned int start;
+ *
+ * do {
+ * start = u64_stats_fetch_begin(&stats->syncp);
+ * tbytes = stats->bytes64; // non atomic operation
+ * tpackets = stats->packets64; // non atomic operation
+ * } while (u64_stats_fetch_retry(&stats->syncp, start));
+ *
+ *
+ * Example of use in drivers/net/loopback.c, using per_cpu containers,
+ * in BH disabled context.
+ */
+#include <linux/seqlock.h>
+
+struct u64_stats_sync {
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ seqcount_t seq;
+#endif
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
+#endif
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ preempt_disable();
+#endif
+ return 0;
+#endif
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ preempt_enable();
+#endif
+ return false;
+#endif
+}
+
+/*
+ * In case softirq handlers can update u64 counters, readers can use following helpers
+ * - SMP 32bit arches use seqcount protection, irq safe.
+ * - UP 32bit must disable BH.
+ * - 64bit have no problem atomically reading u64 values, irq safe.
+ */
+static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ local_bh_disable();
+#endif
+ return 0;
+#endif
+}
+
+static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ local_bh_enable();
+#endif
+ return false;
+#endif
+}
+
+#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index cc4f453..8178156 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -36,6 +36,9 @@ static inline void put_user_ns(struct user_namespace *ns)
kref_put(&ns->kref, free_user_ns);
}
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid);
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid);
+
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -52,6 +55,17 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline uid_t user_ns_map_uid(struct user_namespace *to,
+ const struct cred *cred, uid_t uid)
+{
+ return uid;
+}
+static inline gid_t user_ns_map_gid(struct user_namespace *to,
+ const struct cred *cred, gid_t gid)
+{
+ return gid;
+}
+
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 20725e2..90c9e28 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -23,7 +23,8 @@ struct unix_address {
};
struct unix_skb_parms {
- struct ucred creds; /* Skb credentials */
+ struct pid *pid; /* Skb credentials */
+ const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Security ID */
@@ -31,7 +32,6 @@ struct unix_skb_parms {
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
#define UNIXSID(skb) (&UNIXCB((skb)).secid)
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 318ab94..6da573c 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -50,6 +50,9 @@ struct caif_connect_request {
* @client_layer: User implementation of client layer. This layer
* MUST have receive and control callback functions
* implemented.
+ * @ifindex: Link layer interface index used for this connection.
+ * @headroom: Head room needed by CAIF protocol.
+ * @tailroom: Tail room needed by CAIF protocol.
*
* This function connects a CAIF channel. The Client must implement
* the struct cflayer. This layer represents the Client layer and holds
@@ -59,8 +62,9 @@ struct caif_connect_request {
* E.g. CAIF Socket will call this function for each socket it connects
* and have one client_layer instance for each socket.
*/
-int caif_connect_client(struct caif_connect_request *config,
- struct cflayer *client_layer);
+int caif_connect_client(struct caif_connect_request *conn_req,
+ struct cflayer *client_layer, int *ifindex,
+ int *headroom, int *tailroom);
/**
* caif_disconnect_client - Disconnects a client from the CAIF stack.
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 25c472f..c8b07a9 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -15,14 +15,8 @@ struct cfpktq;
struct caif_payload_info;
struct caif_packet_funcs;
-#define CAIF_MAX_FRAMESIZE 4096
-#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64)
-#define CAIF_NEEDED_HEADROOM (10)
-#define CAIF_NEEDED_TAILROOM (2)
#define CAIF_LAYER_NAME_SZ 16
-#define CAIF_SUCCESS 1
-#define CAIF_FAILURE 0
/**
* caif_assert() - Assert function for CAIF.
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
new file mode 100644
index 0000000..ce4570d
--- /dev/null
+++ b/include/net/caif/caif_spi.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CAIF_SPI_H_
+#define CAIF_SPI_H_
+
+#include <net/caif/caif_device.h>
+
+#define SPI_CMD_WR 0x00
+#define SPI_CMD_RD 0x01
+#define SPI_CMD_EOT 0x02
+#define SPI_CMD_IND 0x04
+
+#define SPI_DMA_BUF_LEN 8192
+
+#define WL_SZ 2 /* 16 bits. */
+#define SPI_CMD_SZ 4 /* 32 bits. */
+#define SPI_IND_SZ 4 /* 32 bits. */
+
+#define SPI_XFER 0
+#define SPI_SS_ON 1
+#define SPI_SS_OFF 2
+#define SPI_TERMINATE 3
+
+/* Minimum time between different levels is 50 microseconds. */
+#define MIN_TRANSITION_TIME_USEC 50
+
+/* Defines for calculating duration of SPI transfers for a particular
+ * number of bytes.
+ */
+#define SPI_MASTER_CLK_MHZ 13
+#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
+
+/* Normally this should be aligned on the modem in order to benefit from full
+ * duplex transfers. However a size of 8188 provokes errors when running with
+ * the modem. These errors occur when packet sizes approaches 4 kB of data.
+ */
+#define CAIF_MAX_SPI_FRAME 4092
+
+/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
+ * This number should correspond with the modem setting. The application side
+ * CAIF accepts any number of embedded downlink CAIF frames.
+ */
+#define CAIF_MAX_SPI_PKTS 9
+
+/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
+ * debugging. Both TX and RX buffers will be filled before the transfer.
+ */
+#define CFSPI_DBG_PREFILL 0
+
+/* Structure describing a SPI transfer. */
+struct cfspi_xfer {
+ u16 tx_dma_len;
+ u16 rx_dma_len;
+ void *va_tx;
+ dma_addr_t pa_tx;
+ void *va_rx;
+ dma_addr_t pa_rx;
+};
+
+/* Structure implemented by the SPI interface. */
+struct cfspi_ifc {
+ void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
+ void (*xfer_done_cb) (struct cfspi_ifc *ifc);
+ void *priv;
+};
+
+/* Structure implemented by SPI clients. */
+struct cfspi_dev {
+ int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
+ void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
+ struct cfspi_ifc *ifc;
+ char *name;
+ u32 clk_mhz;
+ void *priv;
+};
+
+/* Enumeration describing the CAIF SPI state. */
+enum cfspi_state {
+ CFSPI_STATE_WAITING = 0,
+ CFSPI_STATE_AWAKE,
+ CFSPI_STATE_FETCH_PKT,
+ CFSPI_STATE_GET_NEXT,
+ CFSPI_STATE_INIT_XFER,
+ CFSPI_STATE_WAIT_ACTIVE,
+ CFSPI_STATE_SIG_ACTIVE,
+ CFSPI_STATE_WAIT_XFER_DONE,
+ CFSPI_STATE_XFER_DONE,
+ CFSPI_STATE_WAIT_INACTIVE,
+ CFSPI_STATE_SIG_INACTIVE,
+ CFSPI_STATE_DELIVER_PKT,
+ CFSPI_STATE_MAX,
+};
+
+/* Structure implemented by SPI physical interfaces. */
+struct cfspi {
+ struct caif_dev_common cfdev;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct sk_buff_head qhead;
+ struct sk_buff_head chead;
+ u16 cmd;
+ u16 tx_cpck_len;
+ u16 tx_npck_len;
+ u16 rx_cpck_len;
+ u16 rx_npck_len;
+ struct cfspi_ifc ifc;
+ struct cfspi_xfer xfer;
+ struct cfspi_dev *dev;
+ unsigned long state;
+ struct work_struct work;
+ struct workqueue_struct *wq;
+ struct list_head list;
+ int flow_off_sent;
+ u32 qd_low_mark;
+ u32 qd_high_mark;
+ struct completion comp;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+ bool flow_stop;
+#ifdef CONFIG_DEBUG_FS
+ enum cfspi_state dbg_state;
+ u16 pcmd;
+ u16 tx_ppck_len;
+ u16 rx_ppck_len;
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_state;
+ struct dentry *dbgfs_frame;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+extern int spi_frm_align;
+extern int spi_up_head_align;
+extern int spi_up_tail_align;
+extern int spi_down_head_align;
+extern int spi_down_tail_align;
+extern struct platform_driver cfspi_spi_driver;
+
+void cfspi_dbg_state(struct cfspi *cfspi, int state);
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_xmitlen(struct cfspi *cfspi);
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_spi_remove(struct platform_device *pdev);
+int cfspi_spi_probe(struct platform_device *pdev);
+int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+int cfspi_xmitlen(struct cfspi *cfspi);
+int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
+void cfspi_xfer(struct work_struct *work);
+
+#endif /* CAIF_SPI_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 9fc2fc2..bd646fa 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -7,6 +7,7 @@
#ifndef CFCNFG_H_
#define CFCNFG_H_
#include <linux/spinlock.h>
+#include <linux/netdevice.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfctrl.h>
@@ -73,8 +74,8 @@ void cfcnfg_remove(struct cfcnfg *cfg);
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
- void *dev, struct cflayer *phy_layer, u16 *phyid,
- enum cfcnfg_phy_preference pref,
+ struct net_device *dev, struct cflayer *phy_layer,
+ u16 *phyid, enum cfcnfg_phy_preference pref,
bool fcs, bool stx);
/**
@@ -114,11 +115,18 @@ void cfcnfg_release_adap_layer(struct cflayer *adap_layer);
* @param: Link setup parameters.
* @adap_layer: Specify the adaptation layer; the receive and
* flow-control functions MUST be set in the structure.
- *
+ * @ifindex: Link layer interface index used for this connection.
+ * @proto_head: Protocol head-space needed by CAIF protocol,
+ * excluding link layer.
+ * @proto_tail: Protocol tail-space needed by CAIF protocol,
+ * excluding link layer.
*/
int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
struct cfctrl_link_param *param,
- struct cflayer *adap_layer);
+ struct cflayer *adap_layer,
+ int *ifindex,
+ int *proto_head,
+ int *proto_tail);
/**
* cfcnfg_get_phyid() - Get physical ID, given type.
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 2dc9eb1..b1fa87e 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -16,6 +16,8 @@ struct cfsrvl {
bool open;
bool phy_flow_on;
bool modem_flow_on;
+ bool supports_flowctrl;
+ void (*release)(struct kref *);
struct dev_info dev_info;
struct kref ref;
};
@@ -25,13 +27,15 @@ struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
-struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
+ int mtu_size);
struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
void cfservl_destroy(struct cflayer *layer);
void cfsrvl_init(struct cfsrvl *service,
- u8 channel_id,
- struct dev_info *dev_info);
+ u8 channel_id,
+ struct dev_info *dev_info,
+ bool supports_flowctrl);
bool cfsrvl_ready(struct cfsrvl *service, int *err);
u8 cfsrvl_getphyid(struct cflayer *layer);
@@ -50,7 +54,10 @@ static inline void cfsrvl_put(struct cflayer *layr)
if (layr == NULL)
return;
s = container_of(layr, struct cfsrvl, layer);
- kref_put(&s->ref, cfsrvl_release);
+
+ WARN_ON(!s->release);
+ if (s->release)
+ kref_put(&s->ref, s->release);
}
#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a56bac1..168fe53 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -810,6 +810,7 @@ struct cfg80211_disassoc_request {
* @beacon_interval: beacon interval to use
* @privacy: this is a protected network, keys will be configured
* after joining
+ * @basic_rates: bitmap of basic rates to use when creating the IBSS
*/
struct cfg80211_ibss_params {
u8 *ssid;
@@ -818,6 +819,7 @@ struct cfg80211_ibss_params {
u8 *ie;
u8 ssid_len, ie_len;
u16 beacon_interval;
+ u32 basic_rates;
bool channel_fixed;
bool privacy;
};
@@ -873,19 +875,6 @@ enum wiphy_params_flags {
WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
};
-/**
- * enum tx_power_setting - TX power adjustment
- *
- * @TX_POWER_AUTOMATIC: the dbm parameter is ignored
- * @TX_POWER_LIMITED: limit TX power by the dbm parameter
- * @TX_POWER_FIXED: fix TX power to the dbm parameter
- */
-enum tx_power_setting {
- TX_POWER_AUTOMATIC,
- TX_POWER_LIMITED,
- TX_POWER_FIXED,
-};
-
/*
* cfg80211_bitrate_mask - masks for bitrate control
*/
@@ -1147,7 +1136,7 @@ struct cfg80211_ops {
int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
int (*set_tx_power)(struct wiphy *wiphy,
- enum tx_power_setting type, int dbm);
+ enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, int *dbm);
int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 39f2dc9..16ff29a 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -20,6 +20,7 @@ struct inet_frag_queue {
atomic_t refcnt;
struct timer_list timer; /* when will this queue expire? */
struct sk_buff *fragments; /* list of received fragments */
+ struct sk_buff *fragments_tail;
ktime_t stamp;
int len; /* total length of orig datagram */
int meat;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1653de5..1989cfd 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -137,7 +137,8 @@ struct inet_sock {
hdrincl:1,
mc_loop:1,
transparent:1,
- mc_all:1;
+ mc_all:1,
+ nodefrag:1;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist *mc_list;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 87b1df0..417d0c8 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -22,10 +22,21 @@ struct inet_peer {
__u32 dtime; /* the time of last use of not
* referenced entries */
atomic_t refcnt;
- atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+ * We can share memory with rcu_head to keep inet_peer small
+ * (less then 64 bytes)
+ */
+ union {
+ struct {
+ atomic_t rid; /* Frag reception counter */
+ atomic_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ };
+ struct rcu_head rcu;
+ };
};
void inet_initpeers(void) __init;
@@ -36,10 +47,21 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create);
/* can be called from BH context or outside */
extern void inet_putpeer(struct inet_peer *p);
+/*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+ */
+static inline void inet_peer_refcheck(const struct inet_peer *p)
+{
+ WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+}
+
+
/* can be called with or without local BH being disabled */
static inline __u16 inet_getid(struct inet_peer *p, int more)
{
more++;
+ inet_peer_refcheck(p);
return atomic_add_return(more, &p->ip_id_count) - more;
}
diff --git a/include/net/ip.h b/include/net/ip.h
index d52f011..890f972 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -165,12 +165,12 @@ struct ipv4_config {
};
extern struct ipv4_config ipv4_config;
-#define IP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.ip_statistics, field)
-#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS_BH((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS_BH((net)->mib.ip_statistics, field, val)
+#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
+#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
+#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
@@ -178,7 +178,15 @@ extern struct ipv4_config ipv4_config;
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize);
+#if BITS_PER_LONG==32
+extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+#else
+static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
+{
+ return snmp_fold_field(mib, offt);
+}
+#endif
+extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
extern void snmp_mib_free(void __percpu *ptr[2]);
extern struct local_ports {
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f5808d5..1f84124 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -136,17 +136,17 @@ extern struct ctl_path net_ipv6_ctl_path[];
/* MIBs */
#define IP6_INC_STATS(net, idev,field) \
- _DEVINC(net, ipv6, , idev, field)
+ _DEVINC(net, ipv6, 64, idev, field)
#define IP6_INC_STATS_BH(net, idev,field) \
- _DEVINC(net, ipv6, _BH, idev, field)
+ _DEVINC(net, ipv6, 64_BH, idev, field)
#define IP6_ADD_STATS(net, idev,field,val) \
- _DEVADD(net, ipv6, , idev, field, val)
+ _DEVADD(net, ipv6, 64, idev, field, val)
#define IP6_ADD_STATS_BH(net, idev,field,val) \
- _DEVADD(net, ipv6, _BH, idev, field, val)
+ _DEVADD(net, ipv6, 64_BH, idev, field, val)
#define IP6_UPD_PO_STATS(net, idev,field,val) \
- _DEVUPD(net, ipv6, , idev, field, val)
+ _DEVUPD(net, ipv6, 64, idev, field, val)
#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
- _DEVUPD(net, ipv6, _BH, idev, field, val)
+ _DEVUPD(net, ipv6, 64_BH, idev, field, val)
#define ICMP6_INC_STATS(net, idev, field) \
_DEVINC(net, icmpv6, , idev, field)
#define ICMP6_INC_STATS_BH(net, idev, field) \
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index abb3b1a..7f256e2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -19,7 +19,6 @@
#include <linux/wireless.h>
#include <linux/device.h>
#include <linux/ieee80211.h>
-#include <linux/inetdevice.h>
#include <net/cfg80211.h>
/**
@@ -147,6 +146,7 @@ struct ieee80211_low_level_stats {
* enabled/disabled (beaconing modes)
* @BSS_CHANGED_CQM: Connection quality monitor config changed
* @BSS_CHANGED_IBSS: IBSS join status changed
+ * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -161,10 +161,18 @@ enum ieee80211_bss_change {
BSS_CHANGED_BEACON_ENABLED = 1<<9,
BSS_CHANGED_CQM = 1<<10,
BSS_CHANGED_IBSS = 1<<11,
+ BSS_CHANGED_ARP_FILTER = 1<<12,
/* when adding here, make sure to change ieee80211_reconfig */
};
+/*
+ * The maximum number of IPv4 addresses listed for ARP filtering. If the number
+ * of addresses for an interface increase beyond this value, hardware ARP
+ * filtering will be disabled.
+ */
+#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
+
/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
@@ -200,6 +208,15 @@ enum ieee80211_bss_change {
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ * may filter ARP queries targeted for other addresses than listed here.
+ * The driver must allow ARP queries targeted for all address listed here
+ * to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list.
+ * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may
+ * filter ARP queries based on the @arp_addr_list, if disabled, the
+ * hardware must not perform any ARP filtering. Note, that the filter will
+ * be enabled also in promiscuous mode.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -220,6 +237,9 @@ struct ieee80211_bss_conf {
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
enum nl80211_channel_type channel_type;
+ __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ u8 arp_addr_cnt;
+ bool arp_filter_enabled;
};
/**
@@ -675,9 +695,6 @@ enum ieee80211_smps_mode {
* @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
* powersave documentation below. This variable is valid only when
* the CONF_PS flag is set.
- * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured
- * by cfg80211 (essentially, wext) If set, this value overrules the value
- * chosen by mac80211 based on ps qos network latency.
*
* @power_level: requested transmit power (in dBm)
*
@@ -697,7 +714,7 @@ enum ieee80211_smps_mode {
*/
struct ieee80211_conf {
u32 flags;
- int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout;
+ int power_level, dynamic_ps_timeout;
int max_sleep_period;
u16 listen_interval;
@@ -1254,6 +1271,15 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
* enabled whenever user has enabled powersave.
*
+ * Some hardware need to toggle a single shared antenna between WLAN and
+ * Bluetooth to facilitate co-existence. These types of hardware set
+ * limitations on the use of host controlled dynamic powersave whenever there
+ * is simultaneous WLAN and Bluetooth traffic. For these types of hardware, the
+ * driver may request temporarily going into full power save, in order to
+ * enable toggling the antenna between BT and WLAN. If the driver requests
+ * disabling dynamic powersave, the @dynamic_ps_timeout value will be
+ * temporarily set to zero until the driver re-enables dynamic powersave.
+ *
* Driver informs U-APSD client support by enabling
* %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
* uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
@@ -1445,7 +1471,7 @@ enum ieee80211_filter_flags {
*
* Note that drivers MUST be able to deal with a TX aggregation
* session being stopped even before they OK'ed starting it by
- * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer
+ * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer
* might receive the addBA frame and send a delBA right away!
*
* @IEEE80211_AMPDU_RX_START: start Rx aggregation
@@ -1529,16 +1555,6 @@ enum ieee80211_ampdu_mlme_action {
* of the bss parameters has changed when a call is made. The callback
* can sleep.
*
- * @configure_arp_filter: Configuration function for hardware ARP query filter.
- * This function is called with all the IP addresses configured to the
- * interface as argument - all ARP queries targeted to any of these
- * addresses must pass through. If the hardware filter does not support
- * enought addresses, hardware filtering must be disabled. The ifa_list
- * argument may be NULL, indicating that filtering must be disabled.
- * This function is called upon association complete with current
- * address(es), and while associated whenever the IP address(es) change.
- * The callback can sleep.
- *
* @prepare_multicast: Prepare for multicast filter configuration.
* This callback is optional, and its return value is passed
* to configure_filter(). This callback must be atomic.
@@ -1640,7 +1656,7 @@ enum ieee80211_ampdu_mlme_action {
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
* Returns a negative error code on failure.
- * The callback must be atomic.
+ * The callback can sleep.
*
* @get_survey: Return per-channel survey information
*
@@ -1678,9 +1694,6 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed);
- int (*configure_arp_filter)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct in_ifaddr *ifa_list);
u64 (*prepare_multicast)(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
void (*configure_filter)(struct ieee80211_hw *hw,
@@ -2314,25 +2327,14 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
- * ieee80211_start_tx_ba_cb - low level driver ready to aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- */
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-
-/**
* ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
* @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the TID to BA on.
*
* This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session. It can be called
+ * from any context.
*/
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
u16 tid);
@@ -2351,25 +2353,14 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
- * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the desired TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- */
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
-
-/**
* ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
* @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the desired TID to BA on.
*
* This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session tear down. It
+ * can be called from any context.
*/
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
u16 tid);
@@ -2465,6 +2456,36 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
void ieee80211_connection_loss(struct ieee80211_vif *vif);
/**
+ * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Some hardware require full power save to manage simultaneous BT traffic
+ * on the WLAN frequency. Full PSM is required periodically, whenever there are
+ * burst of BT traffic. The hardware gets information of BT traffic via
+ * hardware co-existence lines, and consequentially requests mac80211 to
+ * (temporarily) enter full psm.
+ * This function will only temporarily disable dynamic PS, not enable PSM if
+ * it was not already enabled.
+ * The driver must make sure to re-enable dynamic PS using
+ * ieee80211_enable_dyn_ps() if the driver has disabled it.
+ *
+ */
+void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_enable_dyn_ps - restore dynamic psm after being disabled
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function restores dynamic PS after being temporarily disabled via
+ * ieee80211_disable_dyn_ps(). Each ieee80211_disable_dyn_ps() call must
+ * be coupled with an eventual call to this function.
+ *
+ */
+void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif);
+
+/**
* ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
* rssi threshold triggered
*
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb21340..242879b 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -151,7 +151,7 @@ struct neigh_table {
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
- /* HACK. gc_* shoul follow parms without a gap! */
+ /* HACK. gc_* should follow parms without a gap! */
int gc_interval;
int gc_thresh1;
int gc_thresh2;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4fc05b5..f3b201d 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -35,7 +35,7 @@
* nlmsg_new() create a new netlink message
* nlmsg_put() add a netlink message to an skb
* nlmsg_put_answer() callback based nlmsg_put()
- * nlmsg_end() finanlize netlink message
+ * nlmsg_end() finalize netlink message
* nlmsg_get_pos() return current position in message
* nlmsg_trim() trim part of message
* nlmsg_cancel() cancel message construction
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index b35301b..977ec06 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -594,9 +594,16 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
}
#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
+static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
+ int action)
{
- struct sk_buff *n = skb_clone(skb, gfp_mask);
+ struct sk_buff *n;
+
+ if ((action == TC_ACT_STOLEN || action == TC_ACT_QUEUED) &&
+ !skb_shared(skb))
+ n = skb_get(skb);
+ else
+ n = skb_clone(skb, gfp_mask);
if (n) {
n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
diff --git a/include/net/scm.h b/include/net/scm.h
index 8360e47..3165650 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -19,8 +19,10 @@ struct scm_fp_list {
};
struct scm_cookie {
- struct ucred creds; /* Skb credentials */
+ struct pid *pid; /* Skb credentials */
+ const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
+ struct ucred creds; /* Skb credentials */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Passed security ID */
#endif
@@ -42,8 +44,27 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co
{ }
#endif /* CONFIG_SECURITY_NETWORK */
+static __inline__ void scm_set_cred(struct scm_cookie *scm,
+ struct pid *pid, const struct cred *cred)
+{
+ scm->pid = get_pid(pid);
+ scm->cred = get_cred(cred);
+ cred_to_ucred(pid, cred, &scm->creds);
+}
+
+static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+{
+ put_pid(scm->pid);
+ scm->pid = NULL;
+
+ if (scm->cred)
+ put_cred(scm->cred);
+ scm->cred = NULL;
+}
+
static __inline__ void scm_destroy(struct scm_cookie *scm)
{
+ scm_destroy_cred(scm);
if (scm && scm->fp)
__scm_destroy(scm);
}
@@ -51,10 +72,7 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
- struct task_struct *p = current;
- scm->creds.uid = current_uid();
- scm->creds.gid = current_gid();
- scm->creds.pid = task_tgid_vnr(p);
+ scm_set_cred(scm, task_tgid(current), current_cred());
scm->fp = NULL;
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
@@ -96,6 +114,8 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
if (test_bit(SOCK_PASSCRED, &sock->flags))
put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+ scm_destroy_cred(scm);
+
scm_passec(sock, msg, scm);
if (!scm->fp)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 92456f1..a0e6180 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -47,15 +47,16 @@ struct snmp_mib {
}
/*
- * We use all unsigned longs. Linux will soon be so reliable that even
- * these will rapidly get too small 8-). Seriously consider the IpInReceives
- * count on the 20Gb/s + networks people expect in a few years time!
+ * We use unsigned longs for most mibs but u64 for ipstats.
*/
+#include <linux/u64_stats_sync.h>
/* IPstats */
#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
struct ipstats_mib {
- unsigned long mibs[IPSTATS_MIB_MAX];
+ /* mibs[] must be first field of struct ipstats_mib */
+ u64 mibs[IPSTATS_MIB_MAX];
+ struct u64_stats_sync syncp;
};
/* ICMP */
@@ -134,7 +135,7 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS_USER(mib, field, addend) \
this_cpu_add(mib[1]->mibs[field], addend)
#define SNMP_ADD_STATS(mib, field, addend) \
- this_cpu_add(mib[0]->mibs[field], addend)
+ this_cpu_add(mib[!in_softirq()]->mibs[field], addend)
/*
* Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
* to make @ptr a non-percpu pointer.
@@ -155,4 +156,70 @@ struct linux_xfrm_mib {
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
} while (0)
+
+
+#if BITS_PER_LONG==32
+
+#define SNMP_ADD_STATS64_BH(mib, field, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ } while (0)
+#define SNMP_ADD_STATS64_USER(mib, field, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ preempt_disable(); \
+ ptr = __this_cpu_ptr((mib)[1]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ preempt_enable(); \
+ } while (0)
+#define SNMP_ADD_STATS64(mib, field, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ preempt_disable(); \
+ ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[field] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ preempt_enable(); \
+ } while (0)
+#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
+#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ preempt_disable(); \
+ ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[basefield##PKTS]++; \
+ ptr->mibs[basefield##OCTETS] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ preempt_enable(); \
+ } while (0)
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
+ do { \
+ __typeof__(*mib[0]) *ptr; \
+ ptr = __this_cpu_ptr((mib)[!in_softirq()]); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->mibs[basefield##PKTS]++; \
+ ptr->mibs[basefield##OCTETS] += addend; \
+ u64_stats_update_end(&ptr->syncp); \
+ } while (0)
+#else
+#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
+#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
+#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
+#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
+#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
+#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
+#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
+#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#endif
+
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index f8acf38..4f26f2f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -295,7 +295,8 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
- struct ucred sk_peercred;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter *sk_filter;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5731664..c2f96c2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,7 +464,7 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
+extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
@@ -602,6 +602,17 @@ extern u32 __tcp_select_window(struct sock *sk);
*/
#define tcp_time_stamp ((__u32)(jiffies))
+#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in
@@ -620,19 +631,6 @@ struct tcp_skb_cb {
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */
__u8 flags; /* TCP header flags. */
-
- /* NOTE: These must match up to the flags byte in a
- * real TCP header.
- */
-#define TCPCB_FLAG_FIN 0x01
-#define TCPCB_FLAG_SYN 0x02
-#define TCPCB_FLAG_RST 0x04
-#define TCPCB_FLAG_PSH 0x08
-#define TCPCB_FLAG_ACK 0x10
-#define TCPCB_FLAG_URG 0x20
-#define TCPCB_FLAG_ECE 0x40
-#define TCPCB_FLAG_CWR 0x80
-
__u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index b2d70d3..2591583 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -9,6 +9,7 @@
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/highuid.h>
#include <linux/cred.h>
/*
@@ -82,3 +83,46 @@ void free_user_ns(struct kref *kref)
schedule_work(&ns->destroyer);
}
EXPORT_SYMBOL(free_user_ns);
+
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid)
+{
+ struct user_namespace *tmp;
+
+ if (likely(to == cred->user->user_ns))
+ return uid;
+
+
+ /* Is cred->user the creator of the target user_ns
+ * or the creator of one of it's parents?
+ */
+ for ( tmp = to; tmp != &init_user_ns;
+ tmp = tmp->creator->user_ns ) {
+ if (cred->user == tmp->creator) {
+ return (uid_t)0;
+ }
+ }
+
+ /* No useful relationship so no mapping */
+ return overflowuid;
+}
+
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid)
+{
+ struct user_namespace *tmp;
+
+ if (likely(to == cred->user->user_ns))
+ return gid;
+
+ /* Is cred->user the creator of the target user_ns
+ * or the creator of one of it's parents?
+ */
+ for ( tmp = to; tmp != &init_user_ns;
+ tmp = tmp->creator->user_ns ) {
+ if (cred->user == tmp->creator) {
+ return (gid_t)0;
+ }
+ }
+
+ /* No useful relationship so no mapping */
+ return overflowgid;
+}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 6abdcac..8d9503a 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -2,6 +2,7 @@
#define __BEN_VLAN_802_1Q_INC__
#include <linux/if_vlan.h>
+#include <linux/u64_stats_sync.h>
/**
@@ -21,14 +22,16 @@ struct vlan_priority_tci_mapping {
* struct vlan_rx_stats - VLAN percpu rx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
+ * @rx_multicast: number of received multicast packets
+ * @syncp: synchronization point for 64bit counters
* @rx_errors: number of errors
*/
struct vlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ struct u64_stats_sync syncp;
+ unsigned long rx_errors;
};
/**
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 50f58f5..1b9406a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -41,9 +41,9 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
skb->vlan_tci = 0;
- rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
- smp_processor_id());
+ rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
+ u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
@@ -51,7 +51,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
- rx_stats->multicast++;
+ rx_stats->rx_multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
@@ -62,6 +62,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
break;
}
+ u64_stats_update_end(&rx_stats->syncp);
return 0;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 5298426..c6456cb 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -166,6 +166,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
smp_processor_id());
+ u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
@@ -182,7 +183,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
break;
case PACKET_MULTICAST:
- rx_stats->multicast++;
+ rx_stats->rx_multicast++;
break;
case PACKET_OTHERHOST:
@@ -197,6 +198,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
default:
break;
}
+ u64_stats_update_end(&rx_stats->syncp);
vlan_set_encap_proto(skb, vhdr);
@@ -801,27 +803,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
return dev_ethtool_get_flags(vlan->real_dev);
}
-static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev)
{
- struct net_device_stats *stats = &dev->stats;
+ struct rtnl_link_stats64 *stats = &dev->stats64;
- dev_txq_stats_fold(dev, stats);
+ dev_txq_stats_fold(dev, &dev->stats);
if (vlan_dev_info(dev)->vlan_rx_stats) {
- struct vlan_rx_stats *p, rx = {0};
+ struct vlan_rx_stats *p, accum = {0};
int i;
for_each_possible_cpu(i) {
+ u64 rxpackets, rxbytes, rxmulticast;
+ unsigned int start;
+
p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
- rx.rx_packets += p->rx_packets;
- rx.rx_bytes += p->rx_bytes;
- rx.rx_errors += p->rx_errors;
- rx.multicast += p->multicast;
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ rxmulticast = p->rx_multicast;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ accum.rx_packets += rxpackets;
+ accum.rx_bytes += rxbytes;
+ accum.rx_multicast += rxmulticast;
+ /* rx_errors is an ulong, not protected by syncp */
+ accum.rx_errors += p->rx_errors;
}
- stats->rx_packets = rx.rx_packets;
- stats->rx_bytes = rx.rx_bytes;
- stats->rx_errors = rx.rx_errors;
- stats->multicast = rx.multicast;
+ stats->rx_packets = accum.rx_packets;
+ stats->rx_bytes = accum.rx_bytes;
+ stats->rx_errors = accum.rx_errors;
+ stats->multicast = accum.rx_multicast;
}
return stats;
}
@@ -848,7 +860,7 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats = vlan_dev_get_stats,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -872,7 +884,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats = vlan_dev_get_stats,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -897,7 +909,7 @@ static const struct net_device_ops vlan_netdev_ops_sq = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats = vlan_dev_get_stats,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -922,7 +934,7 @@ static const struct net_device_ops vlan_netdev_accel_ops_sq = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
- .ndo_get_stats = vlan_dev_get_stats,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
diff --git a/net/Makefile b/net/Makefile
index cb7bdc1..41d4200 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -50,7 +50,7 @@ endif
obj-$(CONFIG_IP_DCCP) += dccp/
obj-$(CONFIG_IP_SCTP) += sctp/
obj-$(CONFIG_RDS) += rds/
-obj-y += wireless/
+obj-$(CONFIG_WIRELESS) += wireless/
obj-$(CONFIG_MAC80211) += mac80211/
obj-$(CONFIG_TIPC) += tipc/
obj-$(CONFIG_NETLABEL) += netlabel/
@@ -61,7 +61,7 @@ obj-$(CONFIG_CAIF) += caif/
ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
-obj-y += ieee802154/
+obj-$(CONFIG_IEEE802154) += ieee802154/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index b898364..edf639e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -38,8 +38,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
+ u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
brstats->tx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
BR_INPUT_SKB_CB(skb)->brdev = dev;
@@ -47,6 +49,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb, ETH_HLEN);
if (is_multicast_ether_addr(dest)) {
+ if (unlikely(netpoll_tx_running(dev))) {
+ br_flood_deliver(br, skb);
+ goto out;
+ }
if (br_multicast_rcv(br, NULL, skb))
goto out;
@@ -92,21 +98,25 @@ static int br_dev_stop(struct net_device *dev)
return 0;
}
-static struct net_device_stats *br_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct br_cpu_netstats sum = { 0 };
+ struct rtnl_link_stats64 *stats = &dev->stats64;
+ struct br_cpu_netstats tmp, sum = { 0 };
unsigned int cpu;
for_each_possible_cpu(cpu) {
+ unsigned int start;
const struct br_cpu_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
-
- sum.tx_bytes += bstats->tx_bytes;
- sum.tx_packets += bstats->tx_packets;
- sum.rx_bytes += bstats->rx_bytes;
- sum.rx_packets += bstats->rx_packets;
+ do {
+ start = u64_stats_fetch_begin(&bstats->syncp);
+ memcpy(&tmp, bstats, sizeof(tmp));
+ } while (u64_stats_fetch_retry(&bstats->syncp, start));
+ sum.tx_bytes += tmp.tx_bytes;
+ sum.tx_packets += tmp.tx_packets;
+ sum.rx_bytes += tmp.rx_bytes;
+ sum.rx_packets += tmp.rx_packets;
}
stats->tx_bytes = sum.tx_bytes;
@@ -199,73 +209,81 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static bool br_devices_support_netpoll(struct net_bridge *br)
+static void br_poll_controller(struct net_device *br_dev)
{
- struct net_bridge_port *p;
- bool ret = true;
- int count = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&br->lock, flags);
- list_for_each_entry(p, &br->port_list, list) {
- count++;
- if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !p->dev->netdev_ops->ndo_poll_controller)
- ret = false;
- }
- spin_unlock_irqrestore(&br->lock, flags);
- return count != 0 && ret;
}
-static void br_poll_controller(struct net_device *br_dev)
+static void br_netpoll_cleanup(struct net_device *dev)
{
- struct netpoll *np = br_dev->npinfo->netpoll;
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *p, *n;
- if (np->real_dev != br_dev)
- netpoll_poll_dev(np->real_dev);
+ list_for_each_entry_safe(p, n, &br->port_list, list) {
+ br_netpoll_disable(p);
+ }
}
-void br_netpoll_cleanup(struct net_device *dev)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p, *n;
- const struct net_device_ops *ops;
+ int err = 0;
- br->dev->npinfo = NULL;
list_for_each_entry_safe(p, n, &br->port_list, list) {
- if (p->dev) {
- ops = p->dev->netdev_ops;
- if (ops->ndo_netpoll_cleanup)
- ops->ndo_netpoll_cleanup(p->dev);
- else
- p->dev->npinfo = NULL;
- }
+ if (!p->dev)
+ continue;
+
+ err = br_netpoll_enable(p);
+ if (err)
+ goto fail;
}
+
+out:
+ return err;
+
+fail:
+ br_netpoll_cleanup(dev);
+ goto out;
}
-void br_netpoll_disable(struct net_bridge *br,
- struct net_device *dev)
+int br_netpoll_enable(struct net_bridge_port *p)
{
- if (br_devices_support_netpoll(br))
- br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (dev->netdev_ops->ndo_netpoll_cleanup)
- dev->netdev_ops->ndo_netpoll_cleanup(dev);
- else
- dev->npinfo = NULL;
+ struct netpoll *np;
+ int err = 0;
+
+ np = kzalloc(sizeof(*p->np), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np)
+ goto out;
+
+ np->dev = p->dev;
+
+ err = __netpoll_setup(np);
+ if (err) {
+ kfree(np);
+ goto out;
+ }
+
+ p->np = np;
+
+out:
+ return err;
}
-void br_netpoll_enable(struct net_bridge *br,
- struct net_device *dev)
+void br_netpoll_disable(struct net_bridge_port *p)
{
- if (br_devices_support_netpoll(br)) {
- br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (br->dev->npinfo)
- dev->npinfo = br->dev->npinfo;
- } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
- br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
- br_info(br,"new device %s does not support netpoll (disabling)",
- dev->name);
- }
+ struct netpoll *np = p->np;
+
+ if (!np)
+ return;
+
+ p->np = NULL;
+
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu_bh();
+
+ __netpoll_cleanup(np);
+ kfree(np);
}
#endif
@@ -288,12 +306,13 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_start_xmit = br_dev_xmit,
- .ndo_get_stats = br_get_stats,
+ .ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_multicast_list = br_dev_set_multicast_list,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = br_netpoll_setup,
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2663743..a744296 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data)
{
struct net_bridge *br = (struct net_bridge *)_data;
unsigned long delay = hold_time(br);
- unsigned long next_timer = jiffies + br->forward_delay;
+ unsigned long next_timer = jiffies + br->ageing_time;
int i;
spin_lock_bh(&br->hash_lock);
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data)
}
spin_unlock_bh(&br->hash_lock);
- /* Add HZ/4 to ensure we round the jiffies upwards to be after the next
- * timer, otherwise we might round down and will have no-op run. */
- mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4));
+ mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
}
/* Completely flush all dynamic entries in forwarding database.*/
@@ -242,11 +240,11 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
struct net_bridge_fdb_entry *fdb;
int ret;
- if (!dev->br_port)
+ if (!br_port_exists(dev))
return 0;
rcu_read_lock();
- fdb = __br_fdb_get(dev->br_port->br, addr);
+ fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
ret = fdb && fdb->dst->dev != dev &&
fdb->dst->state == BR_STATE_FORWARDING;
rcu_read_unlock();
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a98ef13..cbfe87f 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
- netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
- skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
- } else
-#endif
- dev_queue_xmit(skb);
+ dev_queue_xmit(skb);
}
}
@@ -73,23 +66,20 @@ int br_forward_finish(struct sk_buff *skb)
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
-#ifdef CONFIG_NET_POLL_CONTROLLER
- struct net_bridge *br = to->br;
- if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
- struct netpoll *np;
- to->dev->npinfo = skb->dev->npinfo;
- np = skb->dev->npinfo->netpoll;
- np->real_dev = np->dev = to->dev;
- to->dev->priv_flags |= IFF_IN_NETPOLL;
- }
-#endif
skb->dev = to->dev;
+
+ if (unlikely(netpoll_tx_running(to->dev))) {
+ if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
+ kfree_skb(skb);
+ else {
+ skb_push(skb, ETH_HLEN);
+ br_netpoll_send_skb(to, skb);
+ }
+ return;
+ }
+
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (skb->dev->npinfo)
- skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -140,10 +130,10 @@ static int deliver_clone(const struct net_bridge_port *prev,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
- struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
-
dev->stats.tx_dropped++;
return -ENOMEM;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index d924234..c03d2c3 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,15 +147,17 @@ static void del_nbp(struct net_bridge_port *p)
list_del_rcu(&p->list);
+ dev->priv_flags &= ~IFF_BRIDGE_PORT;
+
netdev_rx_handler_unregister(dev);
- rcu_assign_pointer(dev->br_port, NULL);
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
- br_netpoll_disable(br, dev);
+ br_netpoll_disable(p);
+
call_rcu(&p->rcu, destroy_nbp_rcu);
}
@@ -168,8 +170,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
del_nbp(p);
}
- br_netpoll_cleanup(br->dev);
-
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
@@ -401,7 +401,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return -ELOOP;
/* Device is already being bridged */
- if (dev->br_port != NULL)
+ if (br_port_exists(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
@@ -429,12 +429,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
- rcu_assign_pointer(dev->br_port, p);
+ if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
+ goto err3;
- err = netdev_rx_handler_register(dev, br_handle_frame);
+ err = netdev_rx_handler_register(dev, br_handle_frame, p);
if (err)
goto err3;
+ dev->priv_flags |= IFF_BRIDGE_PORT;
+
dev_disable_lro(dev);
list_add_rcu(&p->list, &br->port_list);
@@ -454,11 +457,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
kobject_uevent(&p->kobj, KOBJ_ADD);
- br_netpoll_enable(br, dev);
-
return 0;
err3:
- rcu_assign_pointer(dev->br_port, NULL);
+ sysfs_remove_link(br->ifobj, p->dev->name);
err2:
br_fdb_delete_by_port(br, p, 1);
err1:
@@ -475,9 +476,13 @@ put_back:
/* called with RTNL */
int br_del_if(struct net_bridge *br, struct net_device *dev)
{
- struct net_bridge_port *p = dev->br_port;
+ struct net_bridge_port *p;
+
+ if (!br_port_exists(dev))
+ return -EINVAL;
- if (!p || p->br != br)
+ p = br_port_get(dev);
+ if (p->br != br)
return -EINVAL;
del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 99647d8..5fc1c5b 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -27,8 +27,10 @@ static int br_pass_frame_up(struct sk_buff *skb)
struct net_bridge *br = netdev_priv(brdev);
struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+ u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
brstats->rx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
indev = skb->dev;
skb->dev = brdev;
@@ -41,7 +43,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
- struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
@@ -111,10 +113,9 @@ drop:
/* note: already called with rcu_read_lock (preempt_disabled) */
static int br_handle_local_finish(struct sk_buff *skb)
{
- struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
- if (p)
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
return 0; /* process further */
}
@@ -151,7 +152,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
if (!skb)
return NULL;
- p = rcu_dereference(skb->dev->br_port);
+ p = br_port_get_rcu(skb->dev);
if (unlikely(is_link_local(dest))) {
/* Pause frames shouldn't be passed up by driver anyway */
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f1d49ae..9fdf1b1 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -130,16 +130,17 @@ void br_netfilter_rtable_init(struct net_bridge *br)
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
- struct net_bridge_port *port = rcu_dereference(dev->br_port);
-
- return port ? &port->br->fake_rtable : NULL;
+ if (!br_port_exists(dev))
+ return NULL;
+ return &br_port_get_rcu(dev)->br->fake_rtable;
}
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
- struct net_bridge_port *port = rcu_dereference(dev->br_port);
+ if (!br_port_exists(dev))
+ return NULL;
- return port ? port->br->dev : NULL;
+ return br_port_get_rcu(dev)->br->dev;
}
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -554,7 +555,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, len)))
goto out;
- p = rcu_dereference(in->br_port);
+ p = br_port_get_rcu(in);
if (p == NULL)
goto out;
br = p->br;
@@ -726,7 +727,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
- p = rcu_dereference(out->br_port);
+ p = br_port_get_rcu(out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fe0a790..4a6a378 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -120,10 +120,11 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
for_each_netdev(net, dev) {
/* not a bridge port */
- if (dev->br_port == NULL || idx < cb->args[0])
+ if (!br_port_exists(dev) || idx < cb->args[0])
goto skip;
- if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid,
+ if (br_fill_ifinfo(skb, br_port_get(dev),
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWLINK,
NLM_F_MULTI) < 0)
break;
@@ -168,9 +169,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (!dev)
return -ENODEV;
- p = dev->br_port;
- if (!p)
+ if (!br_port_exists(dev))
return -EINVAL;
+ p = br_port_get(dev);
/* if kernel STP is running, don't allow changes */
if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 717e1fd..404d4e1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,14 +32,15 @@ struct notifier_block br_device_notifier = {
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
- struct net_bridge_port *p = dev->br_port;
+ struct net_bridge_port *p = br_port_get(dev);
struct net_bridge *br;
int err;
/* not a port of a bridge */
- if (p == NULL)
+ if (!br_port_exists(dev))
return NOTIFY_DONE;
+ p = br_port_get(dev);
br = p->br;
switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 7484065..75c90ed 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
+#include <linux/netpoll.h>
+#include <linux/u64_stats_sync.h>
#include <net/route.h>
#define BR_HASH_BITS 8
@@ -143,13 +145,23 @@ struct net_bridge_port
#ifdef CONFIG_SYSFS
char sysfs_name[IFNAMSIZ];
#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
};
+#define br_port_get_rcu(dev) \
+ ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
+#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
+#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
+
struct br_cpu_netstats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
};
struct net_bridge
@@ -276,16 +288,41 @@ extern void br_dev_setup(struct net_device *dev);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void br_netpoll_cleanup(struct net_device *dev);
-extern void br_netpoll_enable(struct net_bridge *br,
- struct net_device *dev);
-extern void br_netpoll_disable(struct net_bridge *br,
- struct net_device *dev);
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+ return br->dev->npinfo;
+}
+
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+ struct netpoll *np = p->np;
+
+ if (np)
+ netpoll_send_skb(np, skb);
+}
+
+extern int br_netpoll_enable(struct net_bridge_port *p);
+extern void br_netpoll_disable(struct net_bridge_port *p);
#else
-#define br_netpoll_cleanup(br)
-#define br_netpoll_enable(br, dev)
-#define br_netpoll_disable(br, dev)
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+ return NULL;
+}
+
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+}
+static inline int br_netpoll_enable(struct net_bridge_port *p)
+{
+ return 0;
+}
+
+static inline void br_netpoll_disable(struct net_bridge_port *p)
+{
+}
#endif
/* br_fdb.c */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 217bd22..70aecb4 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -137,12 +137,13 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
- struct net_bridge_port *p = rcu_dereference(dev->br_port);
+ struct net_bridge_port *p;
struct net_bridge *br;
const unsigned char *buf;
- if (!p)
+ if (!br_port_exists(dev))
goto err;
+ p = br_port_get_rcu(dev);
if (!pskb_may_pull(skb, 4))
goto err;
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 9e19166..46624bb 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -24,8 +24,9 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
return EBT_DROP;
if (par->hooknum != NF_BR_BROUTING)
+ /* rcu_read_lock()ed by nf_hook_slow */
memcpy(eth_hdr(skb)->h_dest,
- par->in->br_port->br->dev->dev_addr, ETH_ALEN);
+ br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
else
memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
skb->pkt_type = PACKET_HOST;
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ae3c7ce..26377e9 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -177,8 +177,9 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
if (in) {
strcpy(pm->physindev, in->name);
/* If in isn't a bridge, then physindev==indev */
- if (in->br_port)
- strcpy(pm->indev, in->br_port->br->dev->name);
+ if (br_port_exists(in))
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
else
strcpy(pm->indev, in->name);
} else
@@ -187,7 +188,8 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
if (out) {
/* If out exists, then out is a bridge port */
strcpy(pm->physoutdev, out->name);
- strcpy(pm->outdev, out->br_port->br->dev->name);
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
} else
pm->outdev[0] = pm->physoutdev[0] = '\0';
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 59ca00e..bcc102e 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -140,11 +140,14 @@ ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
return 1;
if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
return 1;
- if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
- e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
+ /* rcu_read_lock()ed by nf_hook_slow */
+ if (in && br_port_exists(in) &&
+ FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
+ EBT_ILOGICALIN))
return 1;
- if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
- e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
+ if (out && br_port_exists(out) &&
+ FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
+ EBT_ILOGICALOUT))
return 1;
if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index ed65178..529750d 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -21,19 +21,18 @@ menuconfig CAIF
See Documentation/networking/caif for a further explanation on how to
use and configure CAIF.
-if CAIF
-
config CAIF_DEBUG
bool "Enable Debug"
+ depends on CAIF
default n
--- help ---
Enable the inclusion of debug code in the CAIF stack.
Be aware that doing this will impact performance.
If unsure say N.
-
config CAIF_NETDEV
tristate "CAIF GPRS Network device"
+ depends on CAIF
default CAIF
---help---
Say Y if you will be using a CAIF based GPRS network device.
@@ -41,5 +40,3 @@ config CAIF_NETDEV
If you select to build it as a built-in then the main CAIF device must
also be a built-in.
If unsure say Y.
-
-endif
diff --git a/net/caif/Makefile b/net/caif/Makefile
index 34852af..f87481f 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -1,23 +1,13 @@
-ifeq ($(CONFIG_CAIF_DEBUG),1)
-CAIF_DBG_FLAGS := -DDEBUG
+ifeq ($(CONFIG_CAIF_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
endif
-ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
-
caif-objs := caif_dev.o \
cfcnfg.o cfmuxl.o cfctrl.o \
cffrml.o cfveil.o cfdbgl.o\
cfserl.o cfdgml.o \
cfrfml.o cfvidl.o cfutill.o \
cfsrvl.o cfpkt_skbuff.o caif_config_util.o
-clean-dirs:= .tmp_versions
-
-clean-files:= \
- Module.symvers \
- modules.order \
- *.cmd \
- *.o \
- *~
obj-$(CONFIG_CAIF) += caif.o
obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 6f36580..76ae683 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -80,6 +80,11 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
l->u.utility.paramlen);
break;
+ case CAIFPROTO_DEBUG:
+ l->linktype = CFCTRL_SRV_DBG;
+ l->endpoint = s->sockaddr.u.dbg.service;
+ l->chtype = s->sockaddr.u.dbg.type;
+ break;
default:
return -EINVAL;
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e2b86f1..0b586e9 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -255,7 +255,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
pref = CFPHYPREF_HIGH_BW;
break;
}
-
+ dev_hold(dev);
cfcnfg_add_phy_layer(get_caif_conf(),
phy_type,
dev,
@@ -285,6 +285,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd->layer.up->ctrlcmd(caifd->layer.up,
_CAIF_CTRLCMD_PHYIF_DOWN_IND,
caifd->layer.id);
+ might_sleep();
res = wait_event_interruptible_timeout(caifd->event,
atomic_read(&caifd->in_use) == 0,
TIMEOUT);
@@ -300,6 +301,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
"Unregistering an active CAIF device: %s\n",
__func__, dev->name);
cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+ dev_put(dev);
atomic_set(&caifd->state, what);
break;
@@ -326,7 +328,8 @@ struct cfcnfg *get_caif_conf(void)
EXPORT_SYMBOL(get_caif_conf);
int caif_connect_client(struct caif_connect_request *conn_req,
- struct cflayer *client_layer)
+ struct cflayer *client_layer, int *ifindex,
+ int *headroom, int *tailroom)
{
struct cfctrl_link_param param;
int ret;
@@ -334,8 +337,9 @@ int caif_connect_client(struct caif_connect_request *conn_req,
if (ret)
return ret;
/* Hook up the adaptation layer. */
- return cfcnfg_add_adaptation_layer(get_caif_conf(),
- &param, client_layer);
+ return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
+ client_layer, ifindex,
+ headroom, tailroom);
}
EXPORT_SYMBOL(caif_connect_client);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 7912493..8ce9047 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -28,8 +28,8 @@
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_CAIF);
-#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
-#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
+#define CAIF_DEF_SNDBUF (4096*10)
+#define CAIF_DEF_RCVBUF (4096*100)
/*
* CAIF state is re-using the TCP socket states.
@@ -76,6 +76,7 @@ struct caifsock {
struct caif_connect_request conn_req;
struct mutex readlock;
struct dentry *debugfs_socket_dir;
+ int headroom, tailroom, maxframe;
};
static int rx_flow_is_on(struct caifsock *cf_sk)
@@ -594,27 +595,32 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;
- buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
-
- ret = -EMSGSIZE;
- if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
- goto err;
-
timeo = sock_sndtimeo(sk, noblock);
timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
1, timeo, &ret);
+ if (ret)
+ goto err;
ret = -EPIPE;
if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
sock_flag(sk, SOCK_DEAD) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
goto err;
+ /* Error if trying to write more than maximum frame size. */
+ ret = -EMSGSIZE;
+ if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
+ goto err;
+
+ buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
+
ret = -ENOMEM;
skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
- if (!skb)
+
+ if (!skb || skb_tailroom(skb) < buffer_size)
goto err;
- skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+
+ skb_reserve(skb, cf_sk->headroom);
ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
@@ -645,7 +651,6 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
long timeo;
err = -EOPNOTSUPP;
-
if (unlikely(msg->msg_flags&MSG_OOB))
goto out_err;
@@ -662,8 +667,8 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = len-sent;
- if (size > CAIF_MAX_PAYLOAD_SIZE)
- size = CAIF_MAX_PAYLOAD_SIZE;
+ if (size > cf_sk->maxframe)
+ size = cf_sk->maxframe;
/* If size is more than half of sndbuf, chop up message */
if (size > ((sk->sk_sndbuf >> 1) - 64))
@@ -673,14 +678,14 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = SKB_MAX_ALLOC;
skb = sock_alloc_send_skb(sk,
- size + CAIF_NEEDED_HEADROOM
- + CAIF_NEEDED_TAILROOM,
+ size + cf_sk->headroom +
+ cf_sk->tailroom,
msg->msg_flags&MSG_DONTWAIT,
&err);
if (skb == NULL)
goto out_err;
- skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+ skb_reserve(skb, cf_sk->headroom);
/*
* If you pass two values to the sock_alloc_send_skb
* it tries to grab the large buffer with GFP_NOFS
@@ -821,17 +826,15 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
long timeo;
int err;
+ int ifindex, headroom, tailroom;
+ struct net_device *dev;
+
lock_sock(sk);
err = -EAFNOSUPPORT;
if (uaddr->sa_family != AF_CAIF)
goto out;
- err = -ESOCKTNOSUPPORT;
- if (unlikely(!(sk->sk_type == SOCK_STREAM &&
- cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
- sk->sk_type != SOCK_SEQPACKET))
- goto out;
switch (sock->state) {
case SS_UNCONNECTED:
/* Normal case, a fresh connect */
@@ -887,12 +890,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
dbfs_atomic_inc(&cnt.num_connect_req);
cf_sk->layer.receive = caif_sktrecv_cb;
err = caif_connect_client(&cf_sk->conn_req,
- &cf_sk->layer);
+ &cf_sk->layer, &ifindex, &headroom, &tailroom);
if (err < 0) {
cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
goto out;
}
+ dev = dev_get_by_index(sock_net(sk), ifindex);
+ cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
+ cf_sk->tailroom = tailroom;
+ cf_sk->maxframe = dev->mtu - (headroom + tailroom);
+ dev_put(dev);
+ if (cf_sk->maxframe < 1) {
+ pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
+ __func__, dev->mtu);
+ err = -ENODEV;
+ goto out;
+ }
err = -EINPROGRESS;
wait_connect:
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 7c81974..1c29189 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
@@ -22,6 +23,7 @@
#define PHY_NAME_LEN 20
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
+#define RFM_FRAGMENT_SIZE 4030
/* Information about CAIF physical interfaces held by Config Module in order
* to manage physical interfaces
@@ -41,6 +43,15 @@ struct cfcnfg_phyinfo {
/* Information about the physical device */
struct dev_info dev_info;
+
+ /* Interface index */
+ int ifindex;
+
+ /* Use Start of frame extension */
+ bool use_stx;
+
+ /* Use Start of frame checksum */
+ bool use_fcs;
};
struct cfcnfg {
@@ -248,9 +259,20 @@ static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
{
}
+int protohead[CFCTRL_SRV_MASK] = {
+ [CFCTRL_SRV_VEI] = 4,
+ [CFCTRL_SRV_DATAGRAM] = 7,
+ [CFCTRL_SRV_UTIL] = 4,
+ [CFCTRL_SRV_RFM] = 3,
+ [CFCTRL_SRV_DBG] = 3,
+};
+
int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
struct cfctrl_link_param *param,
- struct cflayer *adap_layer)
+ struct cflayer *adap_layer,
+ int *ifindex,
+ int *proto_head,
+ int *proto_tail)
{
struct cflayer *frml;
if (adap_layer == NULL) {
@@ -276,6 +298,14 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
param->phyid);
caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
param->phyid);
+
+ *ifindex = cnfg->phy_layers[param->phyid].ifindex;
+ *proto_head =
+ protohead[param->linktype]+
+ (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
+
+ *proto_tail = 2;
+
/* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
cfctrl_enum_req(cnfg->ctrl, param->phyid);
return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
@@ -297,6 +327,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
struct cfcnfg *cnfg = container_obj(layer);
struct cflayer *servicel = NULL;
struct cfcnfg_phyinfo *phyinfo;
+ struct net_device *netdev;
+
if (adapt_layer == NULL) {
pr_debug("CAIF: %s(): link setup response "
"but no client exist, send linkdown back\n",
@@ -328,7 +360,9 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
break;
case CFCTRL_SRV_RFM:
- servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
+ netdev = phyinfo->dev_info.dev;
+ servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
+ netdev->mtu);
break;
case CFCTRL_SRV_UTIL:
servicel = cfutill_create(channel_id, &phyinfo->dev_info);
@@ -359,8 +393,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
- void *dev, struct cflayer *phy_layer, u16 *phyid,
- enum cfcnfg_phy_preference pref,
+ struct net_device *dev, struct cflayer *phy_layer,
+ u16 *phyid, enum cfcnfg_phy_preference pref,
bool fcs, bool stx)
{
struct cflayer *frml;
@@ -414,6 +448,10 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
cnfg->phy_layers[*phyid].dev_info.dev = dev;
cnfg->phy_layers[*phyid].phy_layer = phy_layer;
cnfg->phy_layers[*phyid].phy_ref_count = 0;
+ cnfg->phy_layers[*phyid].ifindex = dev->ifindex;
+ cnfg->phy_layers[*phyid].use_stx = stx;
+ cnfg->phy_layers[*phyid].use_fcs = fcs;
+
phy_layer->type = phy_type;
frml = cffrml_create(*phyid, fcs);
if (!frml) {
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index fcfda98..563145f 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -19,7 +19,7 @@
#ifdef CAIF_NO_LOOP
static int handle_loop(struct cfctrl *ctrl,
int cmd, struct cfpkt *pkt){
- return CAIF_FAILURE;
+ return -1;
}
#else
static int handle_loop(struct cfctrl *ctrl,
@@ -43,7 +43,7 @@ struct cflayer *cfctrl_create(void)
memset(&dev_info, 0, sizeof(dev_info));
dev_info.id = 0xff;
memset(this, 0, sizeof(*this));
- cfsrvl_init(&this->serv, 0, &dev_info);
+ cfsrvl_init(&this->serv, 0, &dev_info, false);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
this->serv.layer.receive = cfctrl_recv;
@@ -395,7 +395,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cmd = cmdrsp & CFCTRL_CMD_MASK;
if (cmd != CFCTRL_CMD_LINK_ERR
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
- if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
+ if (handle_loop(cfctrl, cmd, pkt) != 0)
cmdrsp |= CFCTRL_ERR_BIT;
}
@@ -647,6 +647,6 @@ found:
default:
break;
}
- return CAIF_SUCCESS;
+ return 0;
}
#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index ab6b6dc..676648c 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -22,7 +22,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(dbg, 0, sizeof(struct cfsrvl));
- cfsrvl_init(dbg, channel_id, dev_info);
+ cfsrvl_init(dbg, channel_id, dev_info, false);
dbg->layer.receive = cfdbgl_receive;
dbg->layer.transmit = cfdbgl_transmit;
snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 5319484..ed9d53a 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -17,6 +17,7 @@
#define DGM_FLOW_OFF 0x81
#define DGM_FLOW_ON 0x80
#define DGM_CTRL_PKT_SIZE 1
+#define DGM_MTU 1500
static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
@@ -30,7 +31,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(dgm, 0, sizeof(struct cfsrvl));
- cfsrvl_init(dgm, channel_id, dev_info);
+ cfsrvl_init(dgm, channel_id, dev_info, true);
dgm->layer.receive = cfdgml_receive;
dgm->layer.transmit = cfdgml_transmit;
snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
@@ -89,6 +90,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
+ /* STE Modem cannot handle more than 1500 bytes datagrams */
+ if (cfpkt_getlen(pkt) > DGM_MTU)
+ return -EMSGSIZE;
+
cfpkt_add_head(pkt, &zero, 4);
/* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index a6fdf89..01f238f 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,8 +9,8 @@
#include <linux/hardirq.h>
#include <net/caif/cfpkt.h>
-#define PKT_PREFIX CAIF_NEEDED_HEADROOM
-#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_PREFIX 16
+#define PKT_POSTFIX 2
#define PKT_LEN_WHEN_EXTENDING 128
#define PKT_ERROR(pkt, errmsg) do { \
cfpkt_priv(pkt)->erronous = true; \
@@ -338,7 +338,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
u16 dstlen;
u16 createlen;
if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
- cfpkt_destroy(addpkt);
return dstpkt;
}
if (expectlen > addlen)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index fd27b17..4b04d25 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,102 +7,304 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/unaligned/le_byteshift.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
-#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
-
+#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
#define RFM_SEGMENTATION_BIT 0x01
-#define RFM_PAYLOAD 0x00
-#define RFM_CMD_BIT 0x80
-#define RFM_FLOW_OFF 0x81
-#define RFM_FLOW_ON 0x80
-#define RFM_SET_PIN 0x82
-#define RFM_CTRL_PKT_SIZE 1
+#define RFM_HEAD_SIZE 7
static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
-static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
-struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
+struct cfrfml {
+ struct cfsrvl serv;
+ struct cfpkt *incomplete_frm;
+ int fragment_size;
+ u8 seghead[6];
+ u16 pdu_size;
+ /* Protects serialized processing of packets */
+ spinlock_t sync;
+};
+
+static void cfrfml_release(struct kref *kref)
+{
+ struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref);
+ struct cfrfml *rfml = container_obj(&srvl->layer);
+
+ if (rfml->incomplete_frm)
+ cfpkt_destroy(rfml->incomplete_frm);
+
+ kfree(srvl);
+}
+
+struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
+ int mtu_size)
{
- struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!rfm) {
+ int tmp;
+ struct cfrfml *this =
+ kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
+
+ if (!this) {
pr_warning("CAIF: %s(): Out of memory\n", __func__);
return NULL;
}
- caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(rfm, 0, sizeof(struct cfsrvl));
- cfsrvl_init(rfm, channel_id, dev_info);
- rfm->layer.modemcmd = cfservl_modemcmd;
- rfm->layer.receive = cfrfml_receive;
- rfm->layer.transmit = cfrfml_transmit;
- snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
- return &rfm->layer;
+
+ cfsrvl_init(&this->serv, channel_id, dev_info, false);
+ this->serv.release = cfrfml_release;
+ this->serv.layer.receive = cfrfml_receive;
+ this->serv.layer.transmit = cfrfml_transmit;
+
+ /* Round down to closest multiple of 16 */
+ tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
+ tmp *= 16;
+
+ this->fragment_size = tmp;
+ spin_lock_init(&this->sync);
+ snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
+ "rfm%d", channel_id);
+
+ return &this->serv.layer;
}
-static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
+ struct cfpkt *pkt, int *err)
{
- return -EPROTO;
+ struct cfpkt *tmppkt;
+ *err = -EPROTO;
+ /* n-th but not last segment */
+
+ if (cfpkt_extr_head(pkt, seghead, 6) < 0)
+ return NULL;
+
+ /* Verify correct header */
+ if (memcmp(seghead, rfml->seghead, 6) != 0)
+ return NULL;
+
+ tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
+ rfml->pdu_size + RFM_HEAD_SIZE);
+
+ /* If cfpkt_append failes input pkts are not freed */
+ *err = -ENOMEM;
+ if (tmppkt == NULL)
+ return NULL;
+
+ *err = 0;
+ return tmppkt;
}
static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
{
u8 tmp;
bool segmented;
- int ret;
+ int err;
+ u8 seghead[6];
+ struct cfrfml *rfml;
+ struct cfpkt *tmppkt = NULL;
+
caif_assert(layr->up != NULL);
caif_assert(layr->receive != NULL);
+ rfml = container_obj(layr);
+ spin_lock(&rfml->sync);
+
+ err = -EPROTO;
+ if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
+ goto out;
+ segmented = tmp & RFM_SEGMENTATION_BIT;
+
+ if (segmented) {
+ if (rfml->incomplete_frm == NULL) {
+ /* Initial Segment */
+ if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+ goto out;
+
+ rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
+
+ if (cfpkt_erroneous(pkt))
+ goto out;
+ rfml->incomplete_frm = pkt;
+ pkt = NULL;
+ } else {
+
+ tmppkt = rfm_append(rfml, seghead, pkt, &err);
+ if (tmppkt == NULL)
+ goto out;
+
+ if (cfpkt_erroneous(tmppkt))
+ goto out;
+
+ rfml->incomplete_frm = tmppkt;
+
+
+ if (cfpkt_erroneous(tmppkt))
+ goto out;
+ }
+ err = 0;
+ goto out;
+ }
+
+ if (rfml->incomplete_frm) {
+
+ /* Last Segment */
+ tmppkt = rfm_append(rfml, seghead, pkt, &err);
+ if (tmppkt == NULL)
+ goto out;
+
+ if (cfpkt_erroneous(tmppkt))
+ goto out;
+
+ rfml->incomplete_frm = NULL;
+ pkt = tmppkt;
+ tmppkt = NULL;
+
+ /* Verify that length is correct */
+ err = EPROTO;
+ if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
+ goto out;
+ }
+
+ err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
+
+out:
+
+ if (err != 0) {
+ if (tmppkt)
+ cfpkt_destroy(tmppkt);
+ if (pkt)
+ cfpkt_destroy(pkt);
+ if (rfml->incomplete_frm)
+ cfpkt_destroy(rfml->incomplete_frm);
+ rfml->incomplete_frm = NULL;
+
+ pr_info("CAIF: %s(): "
+ "Connection error %d triggered on RFM link\n",
+ __func__, err);
+
+ /* Trigger connection error upon failure.*/
+ layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+ rfml->serv.dev_info.id);
+ }
+ spin_unlock(&rfml->sync);
+ return err;
+}
+
+
+static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
+{
+ caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size);
+
+ /* Add info for MUX-layer to route the packet out. */
+ cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
/*
- * RFM is taking care of segmentation and stripping of
- * segmentation bit.
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
*/
- if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
- cfpkt_destroy(pkt);
- return -EPROTO;
- }
- segmented = tmp & RFM_SEGMENTATION_BIT;
- caif_assert(!segmented);
+ cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
+ cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
- ret = layr->up->receive(layr->up, pkt);
- return ret;
+ return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
}
static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
- u8 tmp = 0;
- int ret;
- struct cfsrvl *service = container_obj(layr);
+ int err;
+ u8 seg;
+ u8 head[6];
+ struct cfpkt *rearpkt = NULL;
+ struct cfpkt *frontpkt = pkt;
+ struct cfrfml *rfml = container_obj(layr);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (!cfsrvl_ready(service, &ret))
- return ret;
+ if (!cfsrvl_ready(&rfml->serv, &err))
+ return err;
+
+ err = -EPROTO;
+ if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
+ goto out;
+
+ err = 0;
+ if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
+ err = cfpkt_peek_head(pkt, head, 6);
+
+ if (err < 0)
+ goto out;
+
+ while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
+
+ seg = 1;
+ err = -EPROTO;
+
+ if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
+ goto out;
+ /*
+ * On OOM error cfpkt_split returns NULL.
+ *
+ * NOTE: Segmented pdu is not correctly aligned.
+ * This has negative performance impact.
+ */
+
+ rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
+ if (rearpkt == NULL)
+ goto out;
+
+ err = cfrfml_transmit_segment(rfml, frontpkt);
+
+ if (err != 0)
+ goto out;
+ frontpkt = rearpkt;
+ rearpkt = NULL;
+
+ err = -ENOMEM;
+ if (frontpkt == NULL)
+ goto out;
+ err = -EPROTO;
+ if (cfpkt_add_head(frontpkt, head, 6) < 0)
+ goto out;
- if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
- pr_err("CAIF: %s():Packet too large - size=%d\n",
- __func__, cfpkt_getlen(pkt));
- return -EOVERFLOW;
}
- if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
- return -EPROTO;
+
+ seg = 0;
+ err = -EPROTO;
+
+ if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
+ goto out;
+
+ err = cfrfml_transmit_segment(rfml, frontpkt);
+
+ frontpkt = NULL;
+out:
+
+ if (err != 0) {
+ pr_info("CAIF: %s(): "
+ "Connection error %d triggered on RFM link\n",
+ __func__, err);
+ /* Trigger connection error upon failure.*/
+
+ layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+ rfml->serv.dev_info.id);
+
+ if (rearpkt)
+ cfpkt_destroy(rearpkt);
+
+ if (frontpkt && frontpkt != pkt) {
+
+ cfpkt_destroy(frontpkt);
+ /*
+ * Socket layer will free the original packet,
+ * but this packet may already be sent and
+ * freed. So we have to return 0 in this case
+ * to avoid socket layer to re-free this packet.
+ * The return of shutdown indication will
+ * cause connection to be invalidated anyhow.
+ */
+ err = 0;
+ }
}
- /* Add info for MUX-layer to route the packet out. */
- cfpkt_info(pkt)->channel_id = service->layer.id;
- /*
- * To optimize alignment, we add up the size of CAIF header before
- * payload.
- */
- cfpkt_info(pkt)->hdr_len = 1;
- cfpkt_info(pkt)->dev_info = &service->dev_info;
- ret = layr->dn->transmit(layr->dn, pkt);
- if (ret < 0)
- cfpkt_extr_head(pkt, &tmp, 1);
- return ret;
+ return err;
}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 965c5ba..a11fbd6 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -14,7 +14,8 @@
#define container_obj(layr) ((struct cfserl *) layr)
#define CFSERL_STX 0x02
-#define CAIF_MINIUM_PACKET_SIZE 4
+#define SERIAL_MINIUM_PACKET_SIZE 4
+#define SERIAL_MAX_FRAMESIZE 4096
struct cfserl {
struct cflayer layer;
struct cfpkt *incomplete_frm;
@@ -119,8 +120,8 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
/*
* Frame error handling
*/
- if (expectlen < CAIF_MINIUM_PACKET_SIZE
- || expectlen > CAIF_MAX_FRAMESIZE) {
+ if (expectlen < SERIAL_MINIUM_PACKET_SIZE
+ || expectlen > SERIAL_MAX_FRAMESIZE) {
if (!layr->usestx) {
if (pkt != NULL)
cfpkt_destroy(pkt);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 6e5b7079..f40939a 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -24,8 +24,10 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfsrvl *service = container_obj(layr);
+
caif_assert(layr->up != NULL);
caif_assert(layr->up->ctrlcmd != NULL);
+
switch (ctrl) {
case CAIF_CTRLCMD_INIT_RSP:
service->open = true;
@@ -89,9 +91,14 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
{
struct cfsrvl *service = container_obj(layr);
+
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
+
+ if (!service->supports_flowctrl)
+ return 0;
+
switch (ctrl) {
case CAIF_MODEMCMD_FLOW_ON_REQ:
{
@@ -152,9 +159,17 @@ void cfservl_destroy(struct cflayer *layer)
kfree(layer);
}
+void cfsrvl_release(struct kref *kref)
+{
+ struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
+ kfree(service);
+}
+
void cfsrvl_init(struct cfsrvl *service,
- u8 channel_id,
- struct dev_info *dev_info)
+ u8 channel_id,
+ struct dev_info *dev_info,
+ bool supports_flowctrl
+ )
{
caif_assert(offsetof(struct cfsrvl, layer) == 0);
service->open = false;
@@ -164,14 +179,11 @@ void cfsrvl_init(struct cfsrvl *service,
service->layer.ctrlcmd = cfservl_ctrlcmd;
service->layer.modemcmd = cfservl_modemcmd;
service->dev_info = *dev_info;
+ service->supports_flowctrl = supports_flowctrl;
+ service->release = cfsrvl_release;
kref_init(&service->ref);
}
-void cfsrvl_release(struct kref *kref)
-{
- struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
- kfree(service);
-}
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 5fd2c9e..02795af 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -31,7 +31,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(util, 0, sizeof(struct cfsrvl));
- cfsrvl_init(util, channel_id, dev_info);
+ cfsrvl_init(util, channel_id, dev_info, true);
util->layer.receive = cfutill_receive;
util->layer.transmit = cfutill_transmit;
snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
@@ -90,12 +90,6 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
- if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
- pr_err("CAIF: %s(): packet too large size=%d\n",
- __func__, cfpkt_getlen(pkt));
- return -EOVERFLOW;
- }
-
cfpkt_add_head(pkt, &zero, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index e04f7d9..77cc09f 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -30,7 +30,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(vei, 0, sizeof(struct cfsrvl));
- cfsrvl_init(vei, channel_id, dev_info);
+ cfsrvl_init(vei, channel_id, dev_info, true);
vei->layer.receive = cfvei_receive;
vei->layer.transmit = cfvei_transmit;
snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
@@ -84,11 +84,6 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
return ret;
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
- pr_warning("CAIF: %s(): Packet too large - size=%d\n",
- __func__, cfpkt_getlen(pkt));
- return -EOVERFLOW;
- }
if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index 89ad4ea..ada6ee2 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -27,7 +27,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(vid, 0, sizeof(struct cfsrvl));
- cfsrvl_init(vid, channel_id, dev_info);
+ cfsrvl_init(vid, channel_id, dev_info, false);
vid->layer.receive = cfvidl_receive;
vid->layer.transmit = cfvidl_transmit;
snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 610966a..4293e19 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -23,7 +23,7 @@
#include <net/caif/caif_dev.h>
/* GPRS PDP connection has MTU to 1500 */
-#define SIZE_MTU 1500
+#define GPRS_PDP_MTU 1500
/* 5 sec. connect timeout */
#define CONNECT_TIMEOUT (5 * HZ)
#define CAIF_NET_DEFAULT_QUEUE_LEN 500
@@ -232,6 +232,8 @@ static int chnl_net_open(struct net_device *dev)
{
struct chnl_net *priv = NULL;
int result = -1;
+ int llifindex, headroom, tailroom, mtu;
+ struct net_device *lldev;
ASSERT_RTNL();
priv = netdev_priv(dev);
if (!priv) {
@@ -241,41 +243,88 @@ static int chnl_net_open(struct net_device *dev)
if (priv->state != CAIF_CONNECTING) {
priv->state = CAIF_CONNECTING;
- result = caif_connect_client(&priv->conn_req, &priv->chnl);
+ result = caif_connect_client(&priv->conn_req, &priv->chnl,
+ &llifindex, &headroom, &tailroom);
if (result != 0) {
- priv->state = CAIF_DISCONNECTED;
pr_debug("CAIF: %s(): err: "
"Unable to register and open device,"
" Err:%d\n",
__func__,
result);
- return result;
+ goto error;
+ }
+
+ lldev = dev_get_by_index(dev_net(dev), llifindex);
+
+ if (lldev == NULL) {
+ pr_debug("CAIF: %s(): no interface?\n", __func__);
+ result = -ENODEV;
+ goto error;
+ }
+
+ dev->needed_tailroom = tailroom + lldev->needed_tailroom;
+ dev->hard_header_len = headroom + lldev->hard_header_len +
+ lldev->needed_tailroom;
+
+ /*
+ * MTU, head-room etc is not know before we have a
+ * CAIF link layer device available. MTU calculation may
+ * override initial RTNL configuration.
+ * MTU is minimum of current mtu, link layer mtu pluss
+ * CAIF head and tail, and PDP GPRS contexts max MTU.
+ */
+ mtu = min_t(int, dev->mtu, lldev->mtu - (headroom + tailroom));
+ mtu = min_t(int, GPRS_PDP_MTU, mtu);
+ dev_set_mtu(dev, mtu);
+ dev_put(lldev);
+
+ if (mtu < 100) {
+ pr_warning("CAIF: %s(): "
+ "CAIF Interface MTU too small (%d)\n",
+ __func__, mtu);
+ result = -ENODEV;
+ goto error;
}
}
+ rtnl_unlock(); /* Release RTNL lock during connect wait */
+
result = wait_event_interruptible_timeout(priv->netmgmt_wq,
priv->state != CAIF_CONNECTING,
CONNECT_TIMEOUT);
+ rtnl_lock();
+
if (result == -ERESTARTSYS) {
pr_debug("CAIF: %s(): wait_event_interruptible"
" woken by a signal\n", __func__);
- return -ERESTARTSYS;
+ result = -ERESTARTSYS;
+ goto error;
}
+
if (result == 0) {
pr_debug("CAIF: %s(): connect timeout\n", __func__);
caif_disconnect_client(&priv->chnl);
priv->state = CAIF_DISCONNECTED;
pr_debug("CAIF: %s(): state disconnected\n", __func__);
- return -ETIMEDOUT;
+ result = -ETIMEDOUT;
+ goto error;
}
if (priv->state != CAIF_CONNECTED) {
pr_debug("CAIF: %s(): connect failed\n", __func__);
- return -ECONNREFUSED;
+ result = -ECONNREFUSED;
+ goto error;
}
pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
return 0;
+
+error:
+ caif_disconnect_client(&priv->chnl);
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ return result;
+
}
static int chnl_net_stop(struct net_device *dev)
@@ -321,9 +370,7 @@ static void ipcaif_net_setup(struct net_device *dev)
dev->destructor = free_netdev;
dev->flags |= IFF_NOARP;
dev->flags |= IFF_POINTOPOINT;
- dev->needed_headroom = CAIF_NEEDED_HEADROOM;
- dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
- dev->mtu = SIZE_MTU;
+ dev->mtu = GPRS_PDP_MTU;
dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
priv = netdev_priv(dev);
diff --git a/net/core/dev.c b/net/core/dev.c
index a1abc10..e85cc5f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1537,7 +1537,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit())
printk(KERN_CRIT "protocol %04x is "
"buggy, dev %s\n",
- skb2->protocol, dev->name);
+ ntohs(skb2->protocol),
+ dev->name);
skb_reset_network_header(skb2);
}
@@ -1895,6 +1896,22 @@ static inline void skb_orphan_try(struct sk_buff *skb)
skb_orphan(skb);
}
+/*
+ * Returns true if either:
+ * 1. skb has frag_list and the device doesn't support FRAGLIST, or
+ * 2. skb is fragmented and the device does not support SG, or if
+ * at least one of fragments is in highmem and device does not
+ * support DMA from it.
+ */
+static inline int skb_needs_linearize(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
+ illegal_highdma(dev, skb))));
+}
+
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
@@ -1919,6 +1936,22 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
goto out_kfree_skb;
if (skb->next)
goto gso;
+ } else {
+ if (skb_needs_linearize(skb, dev) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* If packet is not checksummed and device does not
+ * support checksumming for this protocol, complete
+ * checksumming here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+ if (!dev_can_checksum(dev, skb) &&
+ skb_checksum_help(skb))
+ goto out_kfree_skb;
+ }
}
rc = ops->ndo_start_xmit(skb, dev);
@@ -2089,22 +2122,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
-/*
- * Returns true if either:
- * 1. skb has frag_list and the device doesn't support FRAGLIST, or
- * 2. skb is fragmented and the device does not support SG, or if
- * at least one of fragments is in highmem and device does not
- * support DMA from it.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
- struct net_device *dev)
-{
- return skb_is_nonlinear(skb) &&
- ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
- illegal_highdma(dev, skb))));
-}
-
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -2137,25 +2154,6 @@ int dev_queue_xmit(struct sk_buff *skb)
struct Qdisc *q;
int rc = -ENOMEM;
- /* GSO will handle the following emulations directly. */
- if (netif_needs_gso(dev, skb))
- goto gso;
-
- /* Convert a paged skb to linear, if required */
- if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
- goto out_kfree_skb;
-
- /* If packet is not checksummed and device does not support
- * checksumming for this protocol, complete checksumming here.
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
- if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
- goto out_kfree_skb;
- }
-
-gso:
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
@@ -2214,7 +2212,6 @@ gso:
rc = -ENETDOWN;
rcu_read_unlock_bh();
-out_kfree_skb:
kfree_skb(skb);
return rc;
out:
@@ -2703,6 +2700,7 @@ void netif_nit_deliver(struct sk_buff *skb)
* netdev_rx_handler_register - register receive handler
* @dev: device to register a handler for
* @rx_handler: receive handler to register
+ * @rx_handler_data: data pointer that is used by rx handler
*
* Register a receive hander for a device. This handler will then be
* called from __netif_receive_skb. A negative errno code is returned
@@ -2711,13 +2709,15 @@ void netif_nit_deliver(struct sk_buff *skb)
* The caller must hold the rtnl_mutex.
*/
int netdev_rx_handler_register(struct net_device *dev,
- rx_handler_func_t *rx_handler)
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data)
{
ASSERT_RTNL();
if (dev->rx_handler)
return -EBUSY;
+ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
rcu_assign_pointer(dev->rx_handler, rx_handler);
return 0;
@@ -2737,6 +2737,7 @@ void netdev_rx_handler_unregister(struct net_device *dev)
ASSERT_RTNL();
rcu_assign_pointer(dev->rx_handler, NULL);
+ rcu_assign_pointer(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@ -2761,7 +2762,8 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
if (master->priv_flags & IFF_MASTER_ARPMON)
dev->last_rx = jiffies;
- if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ if ((master->priv_flags & IFF_MASTER_ALB) &&
+ (master->priv_flags & IFF_BRIDGE_PORT)) {
/* Do address unmangle. The local destination address
* will be always the one master has. Provides the right
* functionality in a bridge.
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964..072d1d3 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -144,31 +144,13 @@ u32 ethtool_op_get_flags(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_flags);
-int ethtool_op_set_flags(struct net_device *dev, u32 data)
+int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
{
- const struct ethtool_ops *ops = dev->ethtool_ops;
- unsigned long features = dev->features;
-
- if (data & ETH_FLAG_LRO)
- features |= NETIF_F_LRO;
- else
- features &= ~NETIF_F_LRO;
-
- if (data & ETH_FLAG_NTUPLE) {
- if (!ops->set_rx_ntuple)
- return -EOPNOTSUPP;
- features |= NETIF_F_NTUPLE;
- } else {
- /* safe to clear regardless */
- features &= ~NETIF_F_NTUPLE;
- }
-
- if (data & ETH_FLAG_RXHASH)
- features |= NETIF_F_RXHASH;
- else
- features &= ~NETIF_F_RXHASH;
+ if (data & ~supported)
+ return -EINVAL;
- dev->features = features;
+ dev->features = ((dev->features & ~flags_dup_features) |
+ (data & flags_dup_features));
return 0;
}
EXPORT_SYMBOL(ethtool_op_set_flags);
@@ -376,6 +358,80 @@ err_out:
return ret;
}
+static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
+ void __user *useraddr)
+{
+ struct ethtool_rxfh_indir *indir;
+ u32 table_size;
+ size_t full_size;
+ int ret;
+
+ if (!dev->ethtool_ops->get_rxfh_indir)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&table_size,
+ useraddr + offsetof(struct ethtool_rxfh_indir, size),
+ sizeof(table_size)))
+ return -EFAULT;
+
+ if (table_size >
+ (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
+ return -ENOMEM;
+ full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
+ indir = kmalloc(full_size, GFP_USER);
+ if (!indir)
+ return -ENOMEM;
+
+ indir->cmd = ETHTOOL_GRXFHINDIR;
+ indir->size = table_size;
+ ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(useraddr, indir, full_size))
+ ret = -EFAULT;
+
+out:
+ kfree(indir);
+ return ret;
+}
+
+static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
+ void __user *useraddr)
+{
+ struct ethtool_rxfh_indir *indir;
+ u32 table_size;
+ size_t full_size;
+ int ret;
+
+ if (!dev->ethtool_ops->set_rxfh_indir)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&table_size,
+ useraddr + offsetof(struct ethtool_rxfh_indir, size),
+ sizeof(table_size)))
+ return -EFAULT;
+
+ if (table_size >
+ (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
+ return -ENOMEM;
+ full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
+ indir = kmalloc(full_size, GFP_USER);
+ if (!indir)
+ return -ENOMEM;
+
+ if (copy_from_user(indir, useraddr, full_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
+
+out:
+ kfree(indir);
+ return ret;
+}
+
static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
struct ethtool_rx_ntuple_flow_spec *spec,
struct ethtool_rx_ntuple_flow_spec_container *fsc)
@@ -1544,6 +1600,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GSSET_INFO:
rc = ethtool_get_sset_info(dev, useraddr);
break;
+ case ETHTOOL_GRXFHINDIR:
+ rc = ethtool_get_rxfh_indir(dev, useraddr);
+ break;
+ case ETHTOOL_SRXFHINDIR:
+ rc = ethtool_set_rxfh_indir(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index da69fb7..52b051f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -128,87 +128,87 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
fentry = &filter[pc];
switch (fentry->code) {
- case BPF_ALU|BPF_ADD|BPF_X:
+ case BPF_S_ALU_ADD_X:
A += X;
continue;
- case BPF_ALU|BPF_ADD|BPF_K:
+ case BPF_S_ALU_ADD_K:
A += fentry->k;
continue;
- case BPF_ALU|BPF_SUB|BPF_X:
+ case BPF_S_ALU_SUB_X:
A -= X;
continue;
- case BPF_ALU|BPF_SUB|BPF_K:
+ case BPF_S_ALU_SUB_K:
A -= fentry->k;
continue;
- case BPF_ALU|BPF_MUL|BPF_X:
+ case BPF_S_ALU_MUL_X:
A *= X;
continue;
- case BPF_ALU|BPF_MUL|BPF_K:
+ case BPF_S_ALU_MUL_K:
A *= fentry->k;
continue;
- case BPF_ALU|BPF_DIV|BPF_X:
+ case BPF_S_ALU_DIV_X:
if (X == 0)
return 0;
A /= X;
continue;
- case BPF_ALU|BPF_DIV|BPF_K:
+ case BPF_S_ALU_DIV_K:
A /= fentry->k;
continue;
- case BPF_ALU|BPF_AND|BPF_X:
+ case BPF_S_ALU_AND_X:
A &= X;
continue;
- case BPF_ALU|BPF_AND|BPF_K:
+ case BPF_S_ALU_AND_K:
A &= fentry->k;
continue;
- case BPF_ALU|BPF_OR|BPF_X:
+ case BPF_S_ALU_OR_X:
A |= X;
continue;
- case BPF_ALU|BPF_OR|BPF_K:
+ case BPF_S_ALU_OR_K:
A |= fentry->k;
continue;
- case BPF_ALU|BPF_LSH|BPF_X:
+ case BPF_S_ALU_LSH_X:
A <<= X;
continue;
- case BPF_ALU|BPF_LSH|BPF_K:
+ case BPF_S_ALU_LSH_K:
A <<= fentry->k;
continue;
- case BPF_ALU|BPF_RSH|BPF_X:
+ case BPF_S_ALU_RSH_X:
A >>= X;
continue;
- case BPF_ALU|BPF_RSH|BPF_K:
+ case BPF_S_ALU_RSH_K:
A >>= fentry->k;
continue;
- case BPF_ALU|BPF_NEG:
+ case BPF_S_ALU_NEG:
A = -A;
continue;
- case BPF_JMP|BPF_JA:
+ case BPF_S_JMP_JA:
pc += fentry->k;
continue;
- case BPF_JMP|BPF_JGT|BPF_K:
+ case BPF_S_JMP_JGT_K:
pc += (A > fentry->k) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JGE|BPF_K:
+ case BPF_S_JMP_JGE_K:
pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JEQ|BPF_K:
+ case BPF_S_JMP_JEQ_K:
pc += (A == fentry->k) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JSET|BPF_K:
+ case BPF_S_JMP_JSET_K:
pc += (A & fentry->k) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JGT|BPF_X:
+ case BPF_S_JMP_JGT_X:
pc += (A > X) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JGE|BPF_X:
+ case BPF_S_JMP_JGE_X:
pc += (A >= X) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JEQ|BPF_X:
+ case BPF_S_JMP_JEQ_X:
pc += (A == X) ? fentry->jt : fentry->jf;
continue;
- case BPF_JMP|BPF_JSET|BPF_X:
+ case BPF_S_JMP_JSET_X:
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
- case BPF_LD|BPF_W|BPF_ABS:
+ case BPF_S_LD_W_ABS:
k = fentry->k;
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
@@ -217,7 +217,7 @@ load_w:
continue;
}
break;
- case BPF_LD|BPF_H|BPF_ABS:
+ case BPF_S_LD_H_ABS:
k = fentry->k;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
@@ -226,7 +226,7 @@ load_h:
continue;
}
break;
- case BPF_LD|BPF_B|BPF_ABS:
+ case BPF_S_LD_B_ABS:
k = fentry->k;
load_b:
ptr = load_pointer(skb, k, 1, &tmp);
@@ -235,54 +235,54 @@ load_b:
continue;
}
break;
- case BPF_LD|BPF_W|BPF_LEN:
+ case BPF_S_LD_W_LEN:
A = skb->len;
continue;
- case BPF_LDX|BPF_W|BPF_LEN:
+ case BPF_S_LDX_W_LEN:
X = skb->len;
continue;
- case BPF_LD|BPF_W|BPF_IND:
+ case BPF_S_LD_W_IND:
k = X + fentry->k;
goto load_w;
- case BPF_LD|BPF_H|BPF_IND:
+ case BPF_S_LD_H_IND:
k = X + fentry->k;
goto load_h;
- case BPF_LD|BPF_B|BPF_IND:
+ case BPF_S_LD_B_IND:
k = X + fentry->k;
goto load_b;
- case BPF_LDX|BPF_B|BPF_MSH:
+ case BPF_S_LDX_B_MSH:
ptr = load_pointer(skb, fentry->k, 1, &tmp);
if (ptr != NULL) {
X = (*(u8 *)ptr & 0xf) << 2;
continue;
}
return 0;
- case BPF_LD|BPF_IMM:
+ case BPF_S_LD_IMM:
A = fentry->k;
continue;
- case BPF_LDX|BPF_IMM:
+ case BPF_S_LDX_IMM:
X = fentry->k;
continue;
- case BPF_LD|BPF_MEM:
+ case BPF_S_LD_MEM:
A = mem[fentry->k];
continue;
- case BPF_LDX|BPF_MEM:
+ case BPF_S_LDX_MEM:
X = mem[fentry->k];
continue;
- case BPF_MISC|BPF_TAX:
+ case BPF_S_MISC_TAX:
X = A;
continue;
- case BPF_MISC|BPF_TXA:
+ case BPF_S_MISC_TXA:
A = X;
continue;
- case BPF_RET|BPF_K:
+ case BPF_S_RET_K:
return fentry->k;
- case BPF_RET|BPF_A:
+ case BPF_S_RET_A:
return A;
- case BPF_ST:
+ case BPF_S_ST:
mem[fentry->k] = A;
continue;
- case BPF_STX:
+ case BPF_S_STX:
mem[fentry->k] = X;
continue;
default:
@@ -390,53 +390,128 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
/* Only allow valid instructions */
switch (ftest->code) {
case BPF_ALU|BPF_ADD|BPF_K:
+ ftest->code = BPF_S_ALU_ADD_K;
+ break;
case BPF_ALU|BPF_ADD|BPF_X:
+ ftest->code = BPF_S_ALU_ADD_X;
+ break;
case BPF_ALU|BPF_SUB|BPF_K:
+ ftest->code = BPF_S_ALU_SUB_K;
+ break;
case BPF_ALU|BPF_SUB|BPF_X:
+ ftest->code = BPF_S_ALU_SUB_X;
+ break;
case BPF_ALU|BPF_MUL|BPF_K:
+ ftest->code = BPF_S_ALU_MUL_K;
+ break;
case BPF_ALU|BPF_MUL|BPF_X:
+ ftest->code = BPF_S_ALU_MUL_X;
+ break;
case BPF_ALU|BPF_DIV|BPF_X:
+ ftest->code = BPF_S_ALU_DIV_X;
+ break;
case BPF_ALU|BPF_AND|BPF_K:
+ ftest->code = BPF_S_ALU_AND_K;
+ break;
case BPF_ALU|BPF_AND|BPF_X:
+ ftest->code = BPF_S_ALU_AND_X;
+ break;
case BPF_ALU|BPF_OR|BPF_K:
+ ftest->code = BPF_S_ALU_OR_K;
+ break;
case BPF_ALU|BPF_OR|BPF_X:
+ ftest->code = BPF_S_ALU_OR_X;
+ break;
case BPF_ALU|BPF_LSH|BPF_K:
+ ftest->code = BPF_S_ALU_LSH_K;
+ break;
case BPF_ALU|BPF_LSH|BPF_X:
+ ftest->code = BPF_S_ALU_LSH_X;
+ break;
case BPF_ALU|BPF_RSH|BPF_K:
+ ftest->code = BPF_S_ALU_RSH_K;
+ break;
case BPF_ALU|BPF_RSH|BPF_X:
+ ftest->code = BPF_S_ALU_RSH_X;
+ break;
case BPF_ALU|BPF_NEG:
+ ftest->code = BPF_S_ALU_NEG;
+ break;
case BPF_LD|BPF_W|BPF_ABS:
+ ftest->code = BPF_S_LD_W_ABS;
+ break;
case BPF_LD|BPF_H|BPF_ABS:
+ ftest->code = BPF_S_LD_H_ABS;
+ break;
case BPF_LD|BPF_B|BPF_ABS:
+ ftest->code = BPF_S_LD_B_ABS;
+ break;
case BPF_LD|BPF_W|BPF_LEN:
+ ftest->code = BPF_S_LD_W_LEN;
+ break;
case BPF_LD|BPF_W|BPF_IND:
+ ftest->code = BPF_S_LD_W_IND;
+ break;
case BPF_LD|BPF_H|BPF_IND:
+ ftest->code = BPF_S_LD_H_IND;
+ break;
case BPF_LD|BPF_B|BPF_IND:
+ ftest->code = BPF_S_LD_B_IND;
+ break;
case BPF_LD|BPF_IMM:
+ ftest->code = BPF_S_LD_IMM;
+ break;
case BPF_LDX|BPF_W|BPF_LEN:
+ ftest->code = BPF_S_LDX_W_LEN;
+ break;
case BPF_LDX|BPF_B|BPF_MSH:
+ ftest->code = BPF_S_LDX_B_MSH;
+ break;
case BPF_LDX|BPF_IMM:
+ ftest->code = BPF_S_LDX_IMM;
+ break;
case BPF_MISC|BPF_TAX:
+ ftest->code = BPF_S_MISC_TAX;
+ break;
case BPF_MISC|BPF_TXA:
+ ftest->code = BPF_S_MISC_TXA;
+ break;
case BPF_RET|BPF_K:
+ ftest->code = BPF_S_RET_K;
+ break;
case BPF_RET|BPF_A:
+ ftest->code = BPF_S_RET_A;
break;
/* Some instructions need special checks */
- case BPF_ALU|BPF_DIV|BPF_K:
/* check for division by zero */
+ case BPF_ALU|BPF_DIV|BPF_K:
if (ftest->k == 0)
return -EINVAL;
+ ftest->code = BPF_S_ALU_DIV_K;
break;
+ /* check for invalid memory addresses */
case BPF_LD|BPF_MEM:
+ if (ftest->k >= BPF_MEMWORDS)
+ return -EINVAL;
+ ftest->code = BPF_S_LD_MEM;
+ break;
case BPF_LDX|BPF_MEM:
+ if (ftest->k >= BPF_MEMWORDS)
+ return -EINVAL;
+ ftest->code = BPF_S_LDX_MEM;
+ break;
case BPF_ST:
+ if (ftest->k >= BPF_MEMWORDS)
+ return -EINVAL;
+ ftest->code = BPF_S_ST;
+ break;
case BPF_STX:
- /* check for invalid memory addresses */
if (ftest->k >= BPF_MEMWORDS)
return -EINVAL;
+ ftest->code = BPF_S_STX;
break;
case BPF_JMP|BPF_JA:
@@ -447,28 +522,63 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
*/
if (ftest->k >= (unsigned)(flen-pc-1))
return -EINVAL;
+ ftest->code = BPF_S_JMP_JA;
break;
case BPF_JMP|BPF_JEQ|BPF_K:
+ ftest->code = BPF_S_JMP_JEQ_K;
+ break;
case BPF_JMP|BPF_JEQ|BPF_X:
+ ftest->code = BPF_S_JMP_JEQ_X;
+ break;
case BPF_JMP|BPF_JGE|BPF_K:
+ ftest->code = BPF_S_JMP_JGE_K;
+ break;
case BPF_JMP|BPF_JGE|BPF_X:
+ ftest->code = BPF_S_JMP_JGE_X;
+ break;
case BPF_JMP|BPF_JGT|BPF_K:
+ ftest->code = BPF_S_JMP_JGT_K;
+ break;
case BPF_JMP|BPF_JGT|BPF_X:
+ ftest->code = BPF_S_JMP_JGT_X;
+ break;
case BPF_JMP|BPF_JSET|BPF_K:
+ ftest->code = BPF_S_JMP_JSET_K;
+ break;
case BPF_JMP|BPF_JSET|BPF_X:
+ ftest->code = BPF_S_JMP_JSET_X;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
/* for conditionals both must be safe */
+ switch (ftest->code) {
+ case BPF_S_JMP_JEQ_K:
+ case BPF_S_JMP_JEQ_X:
+ case BPF_S_JMP_JGE_K:
+ case BPF_S_JMP_JGE_X:
+ case BPF_S_JMP_JGT_K:
+ case BPF_S_JMP_JGT_X:
+ case BPF_S_JMP_JSET_X:
+ case BPF_S_JMP_JSET_K:
if (pc + ftest->jt + 1 >= flen ||
pc + ftest->jf + 1 >= flen)
return -EINVAL;
- break;
+ }
+ }
+ /* last instruction must be a RET code */
+ switch (filter[flen - 1].code) {
+ case BPF_S_RET_K:
+ case BPF_S_RET_A:
+ return 0;
+ break;
default:
return -EINVAL;
}
- }
-
- return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
}
EXPORT_SYMBOL(sk_chk_filter);
diff --git a/net/core/flow.c b/net/core/flow.c
index 1619006..8c7c91a 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -222,7 +222,7 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
unsigned int hash;
local_bh_disable();
- fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+ fcp = this_cpu_ptr(fc->percpu);
fle = NULL;
flo = NULL;
@@ -302,7 +302,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
LIST_HEAD(gc_list);
int i, deleted = 0;
- fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+ fcp = this_cpu_ptr(fc->percpu);
for (i = 0; i < flow_cache_hash_size(fc); i++) {
hlist_for_each_entry_safe(fle, entry, tmp,
&fcp->hash_table[i], u.hlist) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e034342..ca6dc31 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -261,6 +261,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
unsigned long tries;
struct net_device *dev = np->dev;
const struct net_device_ops *ops = dev->netdev_ops;
+ /* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo = np->dev->npinfo;
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -692,29 +693,27 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
return -1;
}
-int netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np)
{
- struct net_device *ndev = NULL;
- struct in_device *in_dev;
+ struct net_device *ndev = np->dev;
struct netpoll_info *npinfo;
- struct netpoll *npe, *tmp;
+ const struct net_device_ops *ops;
unsigned long flags;
int err;
- if (np->dev_name)
- ndev = dev_get_by_name(&init_net, np->dev_name);
- if (!ndev) {
- printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
+ if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !ndev->netdev_ops->ndo_poll_controller) {
+ printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
- return -ENODEV;
+ err = -ENOTSUPP;
+ goto out;
}
- np->dev = ndev;
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
- goto put;
+ goto out;
}
npinfo->rx_flags = 0;
@@ -726,6 +725,13 @@ int netpoll_setup(struct netpoll *np)
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
+
+ ops = np->dev->netdev_ops;
+ if (ops->ndo_netpoll_setup) {
+ err = ops->ndo_netpoll_setup(ndev, npinfo);
+ if (err)
+ goto free_npinfo;
+ }
} else {
npinfo = ndev->npinfo;
atomic_inc(&npinfo->refcnt);
@@ -733,12 +739,37 @@ int netpoll_setup(struct netpoll *np)
npinfo->netpoll = np;
- if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !ndev->netdev_ops->ndo_poll_controller) {
- printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+ if (np->rx_hook) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ npinfo->rx_flags |= NETPOLL_RX_ENABLED;
+ list_add_tail(&np->rx, &npinfo->rx_np);
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ }
+
+ /* last thing to do is link it to the net device structure */
+ rcu_assign_pointer(ndev->npinfo, npinfo);
+
+ return 0;
+
+free_npinfo:
+ kfree(npinfo);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(__netpoll_setup);
+
+int netpoll_setup(struct netpoll *np)
+{
+ struct net_device *ndev = NULL;
+ struct in_device *in_dev;
+ int err;
+
+ if (np->dev_name)
+ ndev = dev_get_by_name(&init_net, np->dev_name);
+ if (!ndev) {
+ printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
np->name, np->dev_name);
- err = -ENOTSUPP;
- goto release;
+ return -ENODEV;
}
if (!netif_running(ndev)) {
@@ -754,7 +785,7 @@ int netpoll_setup(struct netpoll *np)
if (err) {
printk(KERN_ERR "%s: failed to open %s\n",
np->name, ndev->name);
- goto release;
+ goto put;
}
atleast = jiffies + HZ/10;
@@ -791,7 +822,7 @@ int netpoll_setup(struct netpoll *np)
printk(KERN_ERR "%s: no IP address for %s, aborting\n",
np->name, np->dev_name);
err = -EDESTADDRREQ;
- goto release;
+ goto put;
}
np->local_ip = in_dev->ifa_list->ifa_local;
@@ -799,34 +830,20 @@ int netpoll_setup(struct netpoll *np)
printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
}
- if (np->rx_hook) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- npinfo->rx_flags |= NETPOLL_RX_ENABLED;
- list_add_tail(&np->rx, &npinfo->rx_np);
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
- }
+ np->dev = ndev;
/* fill up the skb queue */
refill_skbs();
- /* last thing to do is link it to the net device structure */
- ndev->npinfo = npinfo;
+ rtnl_lock();
+ err = __netpoll_setup(np);
+ rtnl_unlock();
- /* avoid racing with NAPI reading npinfo */
- synchronize_rcu();
+ if (err)
+ goto put;
return 0;
- release:
- if (!ndev->npinfo) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
- npe->dev = NULL;
- }
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
- kfree(npinfo);
- }
put:
dev_put(ndev);
return err;
@@ -839,42 +856,56 @@ static int __init netpoll_init(void)
}
core_initcall(netpoll_init);
-void netpoll_cleanup(struct netpoll *np)
+void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
unsigned long flags;
- if (np->dev) {
- npinfo = np->dev->npinfo;
- if (npinfo) {
- if (!list_empty(&npinfo->rx_np)) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- list_del(&np->rx);
- if (list_empty(&npinfo->rx_np))
- npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
- }
+ npinfo = np->dev->npinfo;
+ if (!npinfo)
+ return;
- if (atomic_dec_and_test(&npinfo->refcnt)) {
- const struct net_device_ops *ops;
- skb_queue_purge(&npinfo->arp_tx);
- skb_queue_purge(&npinfo->txq);
- cancel_rearming_delayed_work(&npinfo->tx_work);
-
- /* clean after last, unfinished work */
- __skb_queue_purge(&npinfo->txq);
- kfree(npinfo);
- ops = np->dev->netdev_ops;
- if (ops->ndo_netpoll_cleanup)
- ops->ndo_netpoll_cleanup(np->dev);
- else
- np->dev->npinfo = NULL;
- }
- }
+ if (!list_empty(&npinfo->rx_np)) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_del(&np->rx);
+ if (list_empty(&npinfo->rx_np))
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ }
+
+ if (atomic_dec_and_test(&npinfo->refcnt)) {
+ const struct net_device_ops *ops;
+
+ ops = np->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(np->dev);
- dev_put(np->dev);
+ rcu_assign_pointer(np->dev->npinfo, NULL);
+
+ /* avoid racing with NAPI reading npinfo */
+ synchronize_rcu_bh();
+
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+ cancel_rearming_delayed_work(&npinfo->tx_work);
+
+ /* clean after last, unfinished work */
+ __skb_queue_purge(&npinfo->txq);
+ kfree(npinfo);
}
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+
+void netpoll_cleanup(struct netpoll *np)
+{
+ if (!np->dev)
+ return;
+
+ rtnl_lock();
+ __netpoll_cleanup(np);
+ rtnl_unlock();
+ dev_put(np->dev);
np->dev = NULL;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6428653..1ee2ebd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -115,6 +115,9 @@
* command by Adit Ranadive <adit.262@gmail.com>
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sys.h>
#include <linux/types.h>
#include <linux/module.h>
@@ -174,6 +177,8 @@
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
+#define func_enter() pr_debug("entering %s\n", __func__);
+
/* Device flag bits */
#define F_IPSRC_RND (1<<0) /* IP-Src Random */
#define F_IPDST_RND (1<<1) /* IP-Dst Random */
@@ -424,7 +429,8 @@ static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
}
static const char version[] =
- "pktgen " VERSION ": Packet Generator for packet performance testing.\n";
+ "Packet Generator for packet performance testing. "
+ "Version: " VERSION "\n";
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
@@ -495,7 +501,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
pktgen_reset_all_threads();
else
- printk(KERN_WARNING "pktgen: Unknown command: %s\n", data);
+ pr_warning("Unknown command: %s\n", data);
err = count;
@@ -852,14 +858,14 @@ static ssize_t pktgen_if_write(struct file *file,
pg_result = &(pkt_dev->result[0]);
if (count < 1) {
- printk(KERN_WARNING "pktgen: wrong command format\n");
+ pr_warning("wrong command format\n");
return -EINVAL;
}
max = count - i;
tmp = count_trail_chars(&user_buffer[i], max);
if (tmp < 0) {
- printk(KERN_WARNING "pktgen: illegal format\n");
+ pr_warning("illegal format\n");
return tmp;
}
i += tmp;
@@ -990,9 +996,7 @@ static ssize_t pktgen_if_write(struct file *file,
return len;
pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
if (debug)
- printk(KERN_INFO
- "pktgen: Delay set at: %llu ns\n",
- pkt_dev->delay);
+ pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
sprintf(pg_result, "OK: rate=%lu", value);
return count;
@@ -1007,9 +1011,7 @@ static ssize_t pktgen_if_write(struct file *file,
return len;
pkt_dev->delay = NSEC_PER_SEC/value;
if (debug)
- printk(KERN_INFO
- "pktgen: Delay set at: %llu ns\n",
- pkt_dev->delay);
+ pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
sprintf(pg_result, "OK: rate=%lu", value);
return count;
@@ -1815,7 +1817,7 @@ static ssize_t pktgen_thread_write(struct file *file,
name, (unsigned long)count);
if (!t) {
- printk(KERN_ERR "pktgen: ERROR: No thread\n");
+ pr_err("ERROR: No thread\n");
ret = -EINVAL;
goto out;
}
@@ -1908,7 +1910,7 @@ static void pktgen_mark_device(const char *ifname)
int i = 0;
mutex_lock(&pktgen_thread_lock);
- pr_debug("pktgen: pktgen_mark_device marking %s for removal\n", ifname);
+ pr_debug("%s: marking %s for removal\n", __func__, ifname);
while (1) {
@@ -1917,15 +1919,14 @@ static void pktgen_mark_device(const char *ifname)
break; /* success */
mutex_unlock(&pktgen_thread_lock);
- pr_debug("pktgen: pktgen_mark_device waiting for %s "
- "to disappear....\n", ifname);
+ pr_debug("%s: waiting for %s to disappear....\n",
+ __func__, ifname);
schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
mutex_lock(&pktgen_thread_lock);
if (++i >= max_tries) {
- printk(KERN_ERR "pktgen_mark_device: timed out after "
- "waiting %d msec for device %s to be removed\n",
- msec_per_try * i, ifname);
+ pr_err("%s: timed out after waiting %d msec for device %s to be removed\n",
+ __func__, msec_per_try * i, ifname);
break;
}
@@ -1952,8 +1953,8 @@ static void pktgen_change_name(struct net_device *dev)
&pktgen_if_fops,
pkt_dev);
if (!pkt_dev->entry)
- printk(KERN_ERR "pktgen: can't move proc "
- " entry for '%s'\n", dev->name);
+ pr_err("can't move proc entry for '%s'\n",
+ dev->name);
break;
}
}
@@ -2017,15 +2018,15 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
odev = pktgen_dev_get_by_name(pkt_dev, ifname);
if (!odev) {
- printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname);
+ pr_err("no such netdevice: \"%s\"\n", ifname);
return -ENODEV;
}
if (odev->type != ARPHRD_ETHER) {
- printk(KERN_ERR "pktgen: not an ethernet device: \"%s\"\n", ifname);
+ pr_err("not an ethernet device: \"%s\"\n", ifname);
err = -EINVAL;
} else if (!netif_running(odev)) {
- printk(KERN_ERR "pktgen: device is down: \"%s\"\n", ifname);
+ pr_err("device is down: \"%s\"\n", ifname);
err = -ENETDOWN;
} else {
pkt_dev->odev = odev;
@@ -2044,8 +2045,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
int ntxq;
if (!pkt_dev->odev) {
- printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
- "setup_inject.\n");
+ pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
sprintf(pkt_dev->result,
"ERROR: pkt_dev->odev == NULL in setup_inject.\n");
return;
@@ -2055,19 +2055,15 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
ntxq = pkt_dev->odev->real_num_tx_queues;
if (ntxq <= pkt_dev->queue_map_min) {
- printk(KERN_WARNING "pktgen: WARNING: Requested "
- "queue_map_min (zero-based) (%d) exceeds valid range "
- "[0 - %d] for (%d) queues on %s, resetting\n",
- pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odevname);
+ pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
+ pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
+ pkt_dev->odevname);
pkt_dev->queue_map_min = ntxq - 1;
}
if (pkt_dev->queue_map_max >= ntxq) {
- printk(KERN_WARNING "pktgen: WARNING: Requested "
- "queue_map_max (zero-based) (%d) exceeds valid range "
- "[0 - %d] for (%d) queues on %s, resetting\n",
- pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odevname);
+ pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
+ pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
+ pkt_dev->odevname);
pkt_dev->queue_map_max = ntxq - 1;
}
@@ -2127,8 +2123,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
}
rcu_read_unlock();
if (err)
- printk(KERN_ERR "pktgen: ERROR: IPv6 link "
- "address not availble.\n");
+ pr_err("ERROR: IPv6 link address not available\n");
}
#endif
} else {
@@ -2562,8 +2557,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
if (nhead > 0) {
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
if (ret < 0) {
- printk(KERN_ERR "Error expanding "
- "ipsec packet %d\n", ret);
+ pr_err("Error expanding ipsec packet %d\n",
+ ret);
goto err;
}
}
@@ -2572,8 +2567,7 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
skb_pull(skb, ETH_HLEN);
ret = pktgen_output_ipsec(skb, pkt_dev);
if (ret) {
- printk(KERN_ERR "Error creating ipsec "
- "packet %d\n", ret);
+ pr_err("Error creating ipsec packet %d\n", ret);
goto err;
}
/* restore ll */
@@ -3049,8 +3043,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
if (datalen < sizeof(struct pktgen_hdr)) {
datalen = sizeof(struct pktgen_hdr);
if (net_ratelimit())
- printk(KERN_INFO "pktgen: increased datalen to %d\n",
- datalen);
+ pr_info("increased datalen to %d\n", datalen);
}
udph->source = htons(pkt_dev->cur_udp_src);
@@ -3177,7 +3170,7 @@ static void pktgen_run(struct pktgen_thread *t)
struct pktgen_dev *pkt_dev;
int started = 0;
- pr_debug("pktgen: entering pktgen_run. %p\n", t);
+ func_enter();
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list) {
@@ -3210,7 +3203,7 @@ static void pktgen_stop_all_threads_ifs(void)
{
struct pktgen_thread *t;
- pr_debug("pktgen: entering pktgen_stop_all_threads_ifs.\n");
+ func_enter();
mutex_lock(&pktgen_thread_lock);
@@ -3275,7 +3268,7 @@ static void pktgen_run_all_threads(void)
{
struct pktgen_thread *t;
- pr_debug("pktgen: entering pktgen_run_all_threads.\n");
+ func_enter();
mutex_lock(&pktgen_thread_lock);
@@ -3294,7 +3287,7 @@ static void pktgen_reset_all_threads(void)
{
struct pktgen_thread *t;
- pr_debug("pktgen: entering pktgen_reset_all_threads.\n");
+ func_enter();
mutex_lock(&pktgen_thread_lock);
@@ -3344,8 +3337,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
if (!pkt_dev->running) {
- printk(KERN_WARNING "pktgen: interface: %s is already "
- "stopped\n", pkt_dev->odevname);
+ pr_warning("interface: %s is already stopped\n",
+ pkt_dev->odevname);
return -EINVAL;
}
@@ -3381,7 +3374,7 @@ static void pktgen_stop(struct pktgen_thread *t)
{
struct pktgen_dev *pkt_dev;
- pr_debug("pktgen: entering pktgen_stop\n");
+ func_enter();
if_lock(t);
@@ -3401,7 +3394,7 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
struct list_head *q, *n;
struct pktgen_dev *cur;
- pr_debug("pktgen: entering pktgen_rem_one_if\n");
+ func_enter();
if_lock(t);
@@ -3427,9 +3420,10 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
struct list_head *q, *n;
struct pktgen_dev *cur;
+ func_enter();
+
/* Remove all devices, free mem */
- pr_debug("pktgen: entering pktgen_rem_all_ifs\n");
if_lock(t);
list_for_each_safe(q, n, &t->if_list) {
@@ -3511,8 +3505,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->skb = fill_packet(odev, pkt_dev);
if (pkt_dev->skb == NULL) {
- printk(KERN_ERR "pktgen: ERROR: couldn't "
- "allocate skb in fill_packet.\n");
+ pr_err("ERROR: couldn't allocate skb in fill_packet\n");
schedule();
pkt_dev->clone_count--; /* back out increment, OOM */
return;
@@ -3592,8 +3585,7 @@ static int pktgen_thread_worker(void *arg)
init_waitqueue_head(&t->queue);
complete(&t->start_done);
- pr_debug("pktgen: starting pktgen/%d: pid=%d\n",
- cpu, task_pid_nr(current));
+ pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
set_current_state(TASK_INTERRUPTIBLE);
@@ -3646,13 +3638,13 @@ static int pktgen_thread_worker(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
}
- pr_debug("pktgen: %s stopping all device\n", t->tsk->comm);
+ pr_debug("%s stopping all device\n", t->tsk->comm);
pktgen_stop(t);
- pr_debug("pktgen: %s removing all device\n", t->tsk->comm);
+ pr_debug("%s removing all device\n", t->tsk->comm);
pktgen_rem_all_ifs(t);
- pr_debug("pktgen: %s removing thread.\n", t->tsk->comm);
+ pr_debug("%s removing thread\n", t->tsk->comm);
pktgen_rem_thread(t);
return 0;
@@ -3676,7 +3668,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
}
if_unlock(t);
- pr_debug("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev);
+ pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
return pkt_dev;
}
@@ -3692,8 +3684,7 @@ static int add_dev_to_thread(struct pktgen_thread *t,
if_lock(t);
if (pkt_dev->pg_thread) {
- printk(KERN_ERR "pktgen: ERROR: already assigned "
- "to a thread.\n");
+ pr_err("ERROR: already assigned to a thread\n");
rv = -EBUSY;
goto out;
}
@@ -3719,7 +3710,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev = __pktgen_NN_threads(ifname, FIND);
if (pkt_dev) {
- printk(KERN_ERR "pktgen: ERROR: interface already used.\n");
+ pr_err("ERROR: interface already used\n");
return -EBUSY;
}
@@ -3764,7 +3755,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir,
&pktgen_if_fops, pkt_dev);
if (!pkt_dev->entry) {
- printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
+ pr_err("cannot create %s/%s procfs entry\n",
PG_PROC_DIR, ifname);
err = -EINVAL;
goto out2;
@@ -3795,8 +3786,7 @@ static int __init pktgen_create_thread(int cpu)
t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
cpu_to_node(cpu));
if (!t) {
- printk(KERN_ERR "pktgen: ERROR: out of memory, can't "
- "create new thread.\n");
+ pr_err("ERROR: out of memory, can't create new thread\n");
return -ENOMEM;
}
@@ -3810,8 +3800,7 @@ static int __init pktgen_create_thread(int cpu)
p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
if (IS_ERR(p)) {
- printk(KERN_ERR "pktgen: kernel_thread() failed "
- "for cpu %d\n", t->cpu);
+ pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
list_del(&t->th_list);
kfree(t);
return PTR_ERR(p);
@@ -3822,7 +3811,7 @@ static int __init pktgen_create_thread(int cpu)
pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir,
&pktgen_thread_fops, t);
if (!pe) {
- printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
+ pr_err("cannot create %s/%s procfs entry\n",
PG_PROC_DIR, t->tsk->comm);
kthread_stop(p);
list_del(&t->th_list);
@@ -3856,11 +3845,10 @@ static int pktgen_remove_device(struct pktgen_thread *t,
struct pktgen_dev *pkt_dev)
{
- pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev);
+ pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
if (pkt_dev->running) {
- printk(KERN_WARNING "pktgen: WARNING: trying to remove a "
- "running interface, stopping it now.\n");
+ pr_warning("WARNING: trying to remove a running interface, stopping it now\n");
pktgen_stop_device(pkt_dev);
}
@@ -3891,7 +3879,7 @@ static int __init pg_init(void)
int cpu;
struct proc_dir_entry *pe;
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net);
if (!pg_proc_dir)
@@ -3899,8 +3887,7 @@ static int __init pg_init(void)
pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
if (pe == NULL) {
- printk(KERN_ERR "pktgen: ERROR: cannot create %s "
- "procfs entry.\n", PGCTRL);
+ pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
proc_net_remove(&init_net, PG_PROC_DIR);
return -EINVAL;
}
@@ -3913,13 +3900,12 @@ static int __init pg_init(void)
err = pktgen_create_thread(cpu);
if (err)
- printk(KERN_WARNING "pktgen: WARNING: Cannot create "
- "thread for cpu %d (%d)\n", cpu, err);
+ pr_warning("WARNING: Cannot create thread for cpu %d (%d)\n",
+ cpu, err);
}
if (list_empty(&pktgen_threads)) {
- printk(KERN_ERR "pktgen: ERROR: Initialization failed for "
- "all threads\n");
+ pr_err("ERROR: Initialization failed for all threads\n");
unregister_netdevice_notifier(&pktgen_notifier_block);
remove_proc_entry(PGCTRL, pg_proc_dir);
proc_net_remove(&init_net, PG_PROC_DIR);
diff --git a/net/core/scm.c b/net/core/scm.c
index b88f6f9..681c976 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -170,6 +170,30 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
err = scm_check_creds(&p->creds);
if (err)
goto error;
+
+ if (pid_vnr(p->pid) != p->creds.pid) {
+ struct pid *pid;
+ err = -ESRCH;
+ pid = find_get_pid(p->creds.pid);
+ if (!pid)
+ goto error;
+ put_pid(p->pid);
+ p->pid = pid;
+ }
+
+ if ((p->cred->euid != p->creds.uid) ||
+ (p->cred->egid != p->creds.gid)) {
+ struct cred *cred;
+ err = -ENOMEM;
+ cred = prepare_creds();
+ if (!cred)
+ goto error;
+
+ cred->uid = cred->euid = p->creds.uid;
+ cred->gid = cred->egid = p->creds.uid;
+ put_cred(p->cred);
+ p->cred = cred;
+ }
break;
default:
goto error;
diff --git a/net/core/sock.c b/net/core/sock.c
index f9ce0db..fef2434 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -110,6 +110,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/highmem.h>
+#include <linux/user_namespace.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -749,6 +750,20 @@ set_rcvbuf:
EXPORT_SYMBOL(sock_setsockopt);
+void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ struct ucred *ucred)
+{
+ ucred->pid = pid_vnr(pid);
+ ucred->uid = ucred->gid = -1;
+ if (cred) {
+ struct user_namespace *current_ns = current_user_ns();
+
+ ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
+ ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
+ }
+}
+EXPORT_SYMBOL_GPL(cred_to_ucred);
+
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -901,11 +916,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_PEERCRED:
- if (len > sizeof(sk->sk_peercred))
- len = sizeof(sk->sk_peercred);
- if (copy_to_user(optval, &sk->sk_peercred, len))
+ {
+ struct ucred peercred;
+ if (len > sizeof(peercred))
+ len = sizeof(peercred);
+ cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+ if (copy_to_user(optval, &peercred, len))
return -EFAULT;
goto lenout;
+ }
case SO_PEERNAME:
{
@@ -1119,6 +1138,9 @@ static void __sk_free(struct sock *sk)
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_omem_alloc));
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ put_pid(sk->sk_peer_pid);
put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk);
}
@@ -1954,9 +1976,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- sk->sk_peercred.pid = 0;
- sk->sk_peercred.uid = -1;
- sk->sk_peercred.gid = -1;
+ sk->sk_peer_pid = NULL;
+ sk->sk_peer_cred = NULL;
sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 01e4d39..2abddee 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -82,7 +82,7 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
elapsed_time = delta / 10;
if (elapsed_time != 0 &&
- dccp_insert_option_elapsed_time(sk, skb, elapsed_time))
+ dccp_insert_option_elapsed_time(skb, elapsed_time))
return -1;
avr = dccp_ackvec_record_new();
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index d323589..95f7529 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -715,9 +715,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
x_recv = htonl(hc->rx_x_recv);
pinv = htonl(hc->rx_pinv);
- if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
+ if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE,
&pinv, sizeof(pinv)) ||
- dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
+ dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE,
&x_recv, sizeof(x_recv)))
return -1;
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a10a61a..3ccef1b 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -446,16 +446,12 @@ extern void dccp_feat_list_purge(struct list_head *fn_list);
extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
-extern int dccp_insert_option_elapsed_time(struct sock *sk,
- struct sk_buff *skb,
- u32 elapsed_time);
+extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
extern u32 dccp_timestamp(void);
extern void dccp_timestamping_init(void);
-extern int dccp_insert_option_timestamp(struct sock *sk,
- struct sk_buff *skb);
-extern int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
- unsigned char option,
- const void *value, unsigned char len);
+extern int dccp_insert_option_timestamp(struct sk_buff *skb);
+extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
+ const void *value, unsigned char len);
#ifdef CONFIG_SYSCTL
extern int dccp_sysctl_init(void);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 6beb6a7..10c957a 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -430,7 +430,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
if (dccp_parse_options(sk, NULL, skb))
return 1;
- /* Obtain usec RTT sample from SYN exchange (used by CCID 3) */
+ /* Obtain usec RTT sample from SYN exchange (used by TFRC). */
if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
dp->dccps_options_received.dccpor_timestamp_echo));
@@ -535,6 +535,8 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
const struct dccp_hdr *dh,
const unsigned len)
{
+ struct dccp_sock *dp = dccp_sk(sk);
+ u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
int queued = 0;
switch (dh->dccph_type) {
@@ -559,7 +561,14 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
if (sk->sk_state == DCCP_PARTOPEN)
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
- dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
+ /* Obtain usec RTT sample from SYN exchange (used by TFRC). */
+ if (likely(sample)) {
+ long delta = dccp_timestamp() - sample;
+
+ dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
+ }
+
+ dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_set_state(sk, DCCP_OPEN);
if (dh->dccph_type == DCCP_PKT_DATAACK ||
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 07395f8..bfda087 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -299,9 +299,8 @@ static inline u8 dccp_ndp_len(const u64 ndp)
return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
}
-int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
- const unsigned char option,
- const void *value, const unsigned char len)
+int dccp_insert_option(struct sk_buff *skb, const unsigned char option,
+ const void *value, const unsigned char len)
{
unsigned char *to;
@@ -354,8 +353,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
}
-int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb,
- u32 elapsed_time)
+int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
{
const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
const int len = 2 + elapsed_time_len;
@@ -386,13 +384,13 @@ int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb,
EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time);
-int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb)
+int dccp_insert_option_timestamp(struct sk_buff *skb)
{
__be32 now = htonl(dccp_timestamp());
/* yes this will overflow but that is the point as we want a
* 10 usec 32 bit timer which mean it wraps every 11.9 hours */
- return dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now));
+ return dccp_insert_option(skb, DCCPO_TIMESTAMP, &now, sizeof(now));
}
EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp);
@@ -531,9 +529,9 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) {
/*
* Obtain RTT sample from Request/Response exchange.
- * This is currently used in CCID 3 initialisation.
+ * This is currently used for TFRC initialisation.
*/
- if (dccp_insert_option_timestamp(sk, skb))
+ if (dccp_insert_option_timestamp(skb))
return -1;
} else if (dp->dccps_hc_rx_ackvec != NULL &&
@@ -564,6 +562,10 @@ int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb)
if (dccp_feat_insert_opts(NULL, dreq, skb))
return -1;
+ /* Obtain RTT sample from Response/Ack exchange (used by TFRC). */
+ if (dccp_insert_option_timestamp(skb))
+ return -1;
+
if (dreq->dreq_timestamp_echo != 0 &&
dccp_insert_option_timestamp_echo(NULL, dreq, skb))
return -1;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index f79bcef..096250d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1002,7 +1002,8 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
static inline int dccp_mib_init(void)
{
return snmp_mib_init((void __percpu **)dccp_statistics,
- sizeof(struct dccp_mib));
+ sizeof(struct dccp_mib),
+ __alignof__(struct dccp_mib));
}
static inline void dccp_mib_exit(void)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d99e7e0..3ceb025 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -355,6 +355,8 @@ lookup_protocol:
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
+ inet->nodefrag = 0;
+
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
@@ -1425,13 +1427,49 @@ unsigned long snmp_fold_field(void __percpu *mib[], int offt)
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
-int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
+#if BITS_PER_LONG==32
+
+u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
+{
+ u64 res = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *bhptr, *userptr;
+ struct u64_stats_sync *syncp;
+ u64 v_bh, v_user;
+ unsigned int start;
+
+ /* first mib used by softirq context, we must use _bh() accessors */
+ bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
+ syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
+ do {
+ start = u64_stats_fetch_begin_bh(syncp);
+ v_bh = *(((u64 *) bhptr) + offt);
+ } while (u64_stats_fetch_retry_bh(syncp, start));
+
+ /* second mib used in USER context */
+ userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
+ syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ v_user = *(((u64 *) userptr) + offt);
+ } while (u64_stats_fetch_retry(syncp, start));
+
+ res += v_bh + v_user;
+ }
+ return res;
+}
+EXPORT_SYMBOL_GPL(snmp_fold_field64);
+#endif
+
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
{
BUG_ON(ptr == NULL);
- ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
+ ptr[0] = __alloc_percpu(mibsize, align);
if (!ptr[0])
goto err0;
- ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
+ ptr[1] = __alloc_percpu(mibsize, align);
if (!ptr[1])
goto err1;
return 0;
@@ -1488,25 +1526,32 @@ static const struct net_protocol icmp_protocol = {
static __net_init int ipv4_mib_init_net(struct net *net)
{
if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
- sizeof(struct tcp_mib)) < 0)
+ sizeof(struct tcp_mib),
+ __alignof__(struct tcp_mib)) < 0)
goto err_tcp_mib;
if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
- sizeof(struct ipstats_mib)) < 0)
+ sizeof(struct ipstats_mib),
+ __alignof__(struct ipstats_mib)) < 0)
goto err_ip_mib;
if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
- sizeof(struct linux_mib)) < 0)
+ sizeof(struct linux_mib),
+ __alignof__(struct linux_mib)) < 0)
goto err_net_mib;
if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
- sizeof(struct udp_mib)) < 0)
+ sizeof(struct udp_mib),
+ __alignof__(struct udp_mib)) < 0)
goto err_udp_mib;
if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
- sizeof(struct udp_mib)) < 0)
+ sizeof(struct udp_mib),
+ __alignof__(struct udp_mib)) < 0)
goto err_udplite_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
- sizeof(struct icmp_mib)) < 0)
+ sizeof(struct icmp_mib),
+ __alignof__(struct icmp_mib)) < 0)
goto err_icmp_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
- sizeof(struct icmpmsg_mib)) < 0)
+ sizeof(struct icmpmsg_mib),
+ __alignof__(struct icmpmsg_mib)) < 0)
goto err_icmpmsg_mib;
tcp_mib_init(net);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cf78f41..09ead1b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -333,11 +333,14 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
struct net_device *dev = neigh->dev;
__be32 target = *(__be32*)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev;
- if (!in_dev)
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev) {
+ rcu_read_unlock();
return;
-
+ }
switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
default:
case 0: /* By default announce any local IP */
@@ -358,9 +361,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
case 2: /* Avoid secondary IPs, get a primary/preferred one */
break;
}
+ rcu_read_unlock();
- if (in_dev)
- in_dev_put(in_dev);
if (!saddr)
saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 035673f..9ffa24b 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -51,8 +51,8 @@
* lookups performed with disabled BHs.
*
* Serialisation issues.
- * 1. Nodes may appear in the tree only with the pool write lock held.
- * 2. Nodes may disappear from the tree only with the pool write lock held
+ * 1. Nodes may appear in the tree only with the pool lock held.
+ * 2. Nodes may disappear from the tree only with the pool lock held
* AND reference count being 0.
* 3. Nodes appears and disappears from unused node list only under
* "inet_peer_unused_lock".
@@ -64,7 +64,7 @@
* usually under some other lock to prevent node disappearing
* dtime: unused node list lock
* v4daddr: unchangeable
- * ip_id_count: idlock
+ * ip_id_count: atomic value (no lock needed)
*/
static struct kmem_cache *peer_cachep __read_mostly;
@@ -80,11 +80,11 @@ static const struct inet_peer peer_fake_node = {
static struct {
struct inet_peer *root;
- rwlock_t lock;
+ spinlock_t lock;
int total;
} peers = {
.root = peer_avl_empty,
- .lock = __RW_LOCK_UNLOCKED(peers.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(peers.lock),
.total = 0,
};
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
@@ -129,7 +129,7 @@ void __init inet_initpeers(void)
peer_cachep = kmem_cache_create("inet_peer_cache",
sizeof(struct inet_peer),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
/* All the timers, started at system startup tend
@@ -153,16 +153,13 @@ static void unlink_from_unused(struct inet_peer *p)
/*
* Called with local BH disabled and the pool lock held.
- * _stack is known to be NULL or not at compile time,
- * so compiler will optimize the if (_stack) tests.
*/
#define lookup(_daddr, _stack) \
({ \
struct inet_peer *u, **v; \
- if (_stack != NULL) { \
- stackptr = _stack; \
- *stackptr++ = &peers.root; \
- } \
+ \
+ stackptr = _stack; \
+ *stackptr++ = &peers.root; \
for (u = peers.root; u != peer_avl_empty; ) { \
if (_daddr == u->v4daddr) \
break; \
@@ -170,14 +167,46 @@ static void unlink_from_unused(struct inet_peer *p)
v = &u->avl_left; \
else \
v = &u->avl_right; \
- if (_stack != NULL) \
- *stackptr++ = v; \
+ *stackptr++ = v; \
u = *v; \
} \
u; \
})
-/* Called with local BH disabled and the pool write lock held. */
+/*
+ * Called with rcu_read_lock_bh()
+ * Because we hold no lock against a writer, its quite possible we fall
+ * in an endless loop.
+ * But every pointer we follow is guaranteed to be valid thanks to RCU.
+ * We exit from this function if number of links exceeds PEER_MAXDEPTH
+ */
+static struct inet_peer *lookup_rcu_bh(__be32 daddr)
+{
+ struct inet_peer *u = rcu_dereference_bh(peers.root);
+ int count = 0;
+
+ while (u != peer_avl_empty) {
+ if (daddr == u->v4daddr) {
+ /* Before taking a reference, check if this entry was
+ * deleted, unlink_from_pool() sets refcnt=-1 to make
+ * distinction between an unused entry (refcnt=0) and
+ * a freed one.
+ */
+ if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+ u = NULL;
+ return u;
+ }
+ if ((__force __u32)daddr < (__force __u32)u->v4daddr)
+ u = rcu_dereference_bh(u->avl_left);
+ else
+ u = rcu_dereference_bh(u->avl_right);
+ if (unlikely(++count == PEER_MAXDEPTH))
+ break;
+ }
+ return NULL;
+}
+
+/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start) \
({ \
struct inet_peer *u, **v; \
@@ -191,9 +220,10 @@ static void unlink_from_unused(struct inet_peer *p)
u; \
})
-/* Called with local BH disabled and the pool write lock held.
+/* Called with local BH disabled and the pool lock held.
* Variable names are the proof of operation correctness.
- * Look into mm/map_avl.c for more detail description of the ideas. */
+ * Look into mm/map_avl.c for more detail description of the ideas.
+ */
static void peer_avl_rebalance(struct inet_peer **stack[],
struct inet_peer ***stackend)
{
@@ -269,16 +299,22 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
}
}
-/* Called with local BH disabled and the pool write lock held. */
+/* Called with local BH disabled and the pool lock held. */
#define link_to_pool(n) \
do { \
n->avl_height = 1; \
n->avl_left = peer_avl_empty; \
n->avl_right = peer_avl_empty; \
+ smp_wmb(); /* lockless readers can catch us now */ \
**--stackptr = n; \
peer_avl_rebalance(stack, stackptr); \
} while (0)
+static void inetpeer_free_rcu(struct rcu_head *head)
+{
+ kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
+}
+
/* May be called with local BH enabled. */
static void unlink_from_pool(struct inet_peer *p)
{
@@ -286,13 +322,14 @@ static void unlink_from_pool(struct inet_peer *p)
do_free = 0;
- write_lock_bh(&peers.lock);
+ spin_lock_bh(&peers.lock);
/* Check the reference counter. It was artificially incremented by 1
- * in cleanup() function to prevent sudden disappearing. If the
- * reference count is still 1 then the node is referenced only as `p'
- * here and from the pool. So under the exclusive pool lock it's safe
- * to remove the node and free it later. */
- if (atomic_read(&p->refcnt) == 1) {
+ * in cleanup() function to prevent sudden disappearing. If we can
+ * atomically (because of lockless readers) take this last reference,
+ * it's safe to remove the node and free it later.
+ * We use refcnt=-1 to alert lockless readers this entry is deleted.
+ */
+ if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
struct inet_peer **stack[PEER_MAXDEPTH];
struct inet_peer ***stackptr, ***delp;
if (lookup(p->v4daddr, stack) != p)
@@ -321,17 +358,18 @@ static void unlink_from_pool(struct inet_peer *p)
peers.total--;
do_free = 1;
}
- write_unlock_bh(&peers.lock);
+ spin_unlock_bh(&peers.lock);
if (do_free)
- kmem_cache_free(peer_cachep, p);
+ call_rcu_bh(&p->rcu, inetpeer_free_rcu);
else
/* The node is used again. Decrease the reference counter
* back. The loop "cleanup -> unlink_from_unused
* -> unlink_from_pool -> putpeer -> link_to_unused
* -> cleanup (for the same node)"
* doesn't really exist because the entry will have a
- * recent deletion time and will not be cleaned again soon. */
+ * recent deletion time and will not be cleaned again soon.
+ */
inet_putpeer(p);
}
@@ -375,62 +413,56 @@ static int cleanup_once(unsigned long ttl)
/* Called with or without local BH being disabled. */
struct inet_peer *inet_getpeer(__be32 daddr, int create)
{
- struct inet_peer *p, *n;
+ struct inet_peer *p;
struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
- /* Look up for the address quickly. */
- read_lock_bh(&peers.lock);
- p = lookup(daddr, NULL);
- if (p != peer_avl_empty)
- atomic_inc(&p->refcnt);
- read_unlock_bh(&peers.lock);
+ /* Look up for the address quickly, lockless.
+ * Because of a concurrent writer, we might not find an existing entry.
+ */
+ rcu_read_lock_bh();
+ p = lookup_rcu_bh(daddr);
+ rcu_read_unlock_bh();
+
+ if (p) {
+ /* The existing node has been found.
+ * Remove the entry from unused list if it was there.
+ */
+ unlink_from_unused(p);
+ return p;
+ }
+ /* retry an exact lookup, taking the lock before.
+ * At least, nodes should be hot in our cache.
+ */
+ spin_lock_bh(&peers.lock);
+ p = lookup(daddr, stack);
if (p != peer_avl_empty) {
- /* The existing node has been found. */
+ atomic_inc(&p->refcnt);
+ spin_unlock_bh(&peers.lock);
/* Remove the entry from unused list if it was there. */
unlink_from_unused(p);
return p;
}
-
- if (!create)
- return NULL;
-
- /* Allocate the space outside the locked region. */
- n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
- if (n == NULL)
- return NULL;
- n->v4daddr = daddr;
- atomic_set(&n->refcnt, 1);
- atomic_set(&n->rid, 0);
- atomic_set(&n->ip_id_count, secure_ip_id(daddr));
- n->tcp_ts_stamp = 0;
-
- write_lock_bh(&peers.lock);
- /* Check if an entry has suddenly appeared. */
- p = lookup(daddr, stack);
- if (p != peer_avl_empty)
- goto out_free;
-
- /* Link the node. */
- link_to_pool(n);
- INIT_LIST_HEAD(&n->unused);
- peers.total++;
- write_unlock_bh(&peers.lock);
+ p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
+ if (p) {
+ p->v4daddr = daddr;
+ atomic_set(&p->refcnt, 1);
+ atomic_set(&p->rid, 0);
+ atomic_set(&p->ip_id_count, secure_ip_id(daddr));
+ p->tcp_ts_stamp = 0;
+ INIT_LIST_HEAD(&p->unused);
+
+
+ /* Link the node. */
+ link_to_pool(p);
+ peers.total++;
+ }
+ spin_unlock_bh(&peers.lock);
if (peers.total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
cleanup_once(0);
- return n;
-
-out_free:
- /* The appropriate node is already in the pool. */
- atomic_inc(&p->refcnt);
- write_unlock_bh(&peers.lock);
- /* Remove the entry from unused list if it was there. */
- unlink_from_unused(p);
- /* Free preallocated the preallocated node. */
- kmem_cache_free(peer_cachep, n);
return p;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 75347ea..dd0dbf0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -124,11 +124,8 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
}
/* Memory Tracking Functions. */
-static __inline__ void frag_kfree_skb(struct netns_frags *nf,
- struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
{
- if (work)
- *work -= skb->truesize;
atomic_sub(skb->truesize, &nf->mem);
kfree_skb(skb);
}
@@ -309,7 +306,7 @@ static int ip_frag_reinit(struct ipq *qp)
fp = qp->q.fragments;
do {
struct sk_buff *xp = fp->next;
- frag_kfree_skb(qp->q.net, fp, NULL);
+ frag_kfree_skb(qp->q.net, fp);
fp = xp;
} while (fp);
@@ -317,6 +314,7 @@ static int ip_frag_reinit(struct ipq *qp)
qp->q.len = 0;
qp->q.meat = 0;
qp->q.fragments = NULL;
+ qp->q.fragments_tail = NULL;
qp->iif = 0;
return 0;
@@ -389,6 +387,11 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
* in the chain of fragments so far. We must know where to put
* this fragment, right?
*/
+ prev = qp->q.fragments_tail;
+ if (!prev || FRAG_CB(prev)->offset < offset) {
+ next = NULL;
+ goto found;
+ }
prev = NULL;
for (next = qp->q.fragments; next != NULL; next = next->next) {
if (FRAG_CB(next)->offset >= offset)
@@ -396,6 +399,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
prev = next;
}
+found:
/* We found where to put this one. Check for overlap with
* preceding fragment, and, if needed, align things so that
* any overlaps are eliminated.
@@ -446,7 +450,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->q.fragments = next;
qp->q.meat -= free_it->len;
- frag_kfree_skb(qp->q.net, free_it, NULL);
+ frag_kfree_skb(qp->q.net, free_it);
}
}
@@ -454,6 +458,8 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* Insert this fragment in the chain of fragments. */
skb->next = next;
+ if (!next)
+ qp->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
@@ -507,6 +513,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
goto out_nomem;
fp->next = head->next;
+ if (!fp->next)
+ qp->q.fragments_tail = fp;
prev->next = fp;
skb_morph(head, qp->q.fragments);
@@ -556,7 +564,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- atomic_sub(head->truesize, &qp->q.net->mem);
for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len;
@@ -566,8 +573,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
- atomic_sub(fp->truesize, &qp->q.net->mem);
}
+ atomic_sub(head->truesize, &qp->q.net->mem);
head->next = NULL;
head->dev = dev;
@@ -578,6 +585,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
iph->tot_len = htons(len);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
+ qp->q.fragments_tail = NULL;
return 0;
out_nomem:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index db47a5a..d859bcc 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -342,7 +342,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
#ifdef CONFIG_NET_CLS_ROUTE
if (unlikely(skb_dst(skb)->tclassid)) {
- struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id());
+ struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
u32 idx = skb_dst(skb)->tclassid;
st[idx&0xFF].o_packets++;
st[idx&0xFF].o_bytes += skb->len;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6cbeb2e..7d1f4b4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -873,8 +873,10 @@ int ip_append_data(struct sock *sk,
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
+ skb = skb_peek_tail(&sk->sk_write_queue);
+
inet->cork.length += length;
- if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
+ if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
@@ -892,7 +894,7 @@ int ip_append_data(struct sock *sk,
* adding appropriate IP header.
*/
- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+ if (!skb)
goto alloc_new_skb;
while (length > 0) {
@@ -1121,7 +1123,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
return -EINVAL;
inet->cork.length += size;
- if ((sk->sk_protocol == IPPROTO_UDP) &&
+ if ((size + skb->len > mtu) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 47fff52..6c40a8c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -465,7 +465,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
- (1<<IP_MINTTL))) ||
+ (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
@@ -588,6 +588,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
}
inet->hdrincl = val ? 1 : 0;
break;
+ case IP_NODEFRAG:
+ if (sk->sk_type != SOCK_RAW) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+ inet->nodefrag = val ? 1 : 0;
+ break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
goto e_inval;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index cb763ae..eab8de3 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,6 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct inet_sock *inet = inet_sk(skb->sk);
+
+ if (inet && inet->nodefrag)
+ return NF_ACCEPT;
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
/* Previously seen (loopback)? Ignore. Do this before
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index e320ca6..4ae1f20 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -343,10 +343,12 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
sysctl_ip_default_ttl);
+ BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- snmp_fold_field((void __percpu **)net->mib.ip_statistics,
- snmp4_ipstats_list[i].entry));
+ seq_printf(seq, " %llu",
+ snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+ snmp4_ipstats_list[i].entry,
+ offsetof(struct ipstats_mib, syncp)));
icmp_put(seq); /* RFC 2011 compatibility */
icmpmsg_put(seq);
@@ -432,9 +434,10 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nIpExt:");
for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
- seq_printf(seq, " %lu",
- snmp_fold_field((void __percpu **)net->mib.ip_statistics,
- snmp4_ipextstats_list[i].entry));
+ seq_printf(seq, " %llu",
+ snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+ snmp4_ipextstats_list[i].entry,
+ offsetof(struct ipstats_mib, syncp)));
seq_putc(seq, '\n');
return 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a291edb..03430de 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2881,6 +2881,7 @@ static int rt_fill_info(struct net *net,
error = rt->dst.error;
expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
if (rt->peer) {
+ inet_peer_refcheck(rt->peer);
id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
if (rt->peer->tcp_ts_stamp) {
ts = rt->peer->tcp_ts;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 02bef6a..650cace 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -18,8 +18,8 @@
#include <net/tcp.h>
#include <net/route.h>
-/* Timestamps: lowest 9 bits store TCP options */
-#define TSBITS 9
+/* Timestamps: lowest bits store TCP options */
+#define TSBITS 6
#define TSMASK (((__u32)1 << TSBITS) - 1)
extern int sysctl_tcp_syncookies;
@@ -58,7 +58,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
- * tcp options in the lowest 9 bits of the timestamp value that will be
+ * tcp options in the lower bits of the timestamp value that will be
* sent in the syn-ack.
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
@@ -70,11 +70,10 @@ __u32 cookie_init_timestamp(struct request_sock *req)
u32 options = 0;
ireq = inet_rsk(req);
- if (ireq->wscale_ok) {
- options = ireq->snd_wscale;
- options |= ireq->rcv_wscale << 4;
- }
- options |= ireq->sack_ok << 8;
+
+ options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
+ options |= ireq->sack_ok << 4;
+ options |= ireq->ecn_ok << 5;
ts = ts_now & ~TSMASK;
ts |= options;
@@ -227,26 +226,38 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
* additional tcp options in the timestamp.
* This extracts these options from the timestamp echo.
*
- * The lowest 4 bits are for snd_wscale
- * The next 4 lsb are for rcv_wscale
- * The next lsb is for sack_ok
+ * The lowest 4 bits store snd_wscale.
+ * next 2 bits indicate SACK and ECN support.
+ *
+ * return false if we decode an option that should not be.
*/
-void cookie_check_timestamp(struct tcp_options_received *tcp_opt)
+bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
{
- /* echoed timestamp, 9 lowest bits contain options */
+ /* echoed timestamp, lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr & TSMASK;
- tcp_opt->snd_wscale = options & 0xf;
- options >>= 4;
- tcp_opt->rcv_wscale = options & 0xf;
+ if (!tcp_opt->saw_tstamp) {
+ tcp_clear_options(tcp_opt);
+ return true;
+ }
+
+ if (!sysctl_tcp_timestamps)
+ return false;
tcp_opt->sack_ok = (options >> 4) & 0x1;
+ *ecn_ok = (options >> 5) & 1;
+ if (*ecn_ok && !sysctl_tcp_ecn)
+ return false;
+
+ if (tcp_opt->sack_ok && !sysctl_tcp_sack)
+ return false;
- if (tcp_opt->sack_ok)
- tcp_sack_reset(tcp_opt);
+ if ((options & 0xf) == 0xf)
+ return true; /* no window scaling */
- if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale)
- tcp_opt->wscale_ok = 1;
+ tcp_opt->wscale_ok = 1;
+ tcp_opt->snd_wscale = options & 0xf;
+ return sysctl_tcp_window_scaling != 0;
}
EXPORT_SYMBOL(cookie_check_timestamp);
@@ -265,6 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
int mss;
struct rtable *rt;
__u8 rcv_wscale;
+ bool ecn_ok;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
@@ -281,8 +293,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
+ if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
+ goto out;
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
@@ -298,9 +310,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
ireq->rmt_port = th->source;
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
- ireq->ecn_ok = 0;
+ ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 49d0d2b..4e6ddfb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -511,7 +511,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
}
@@ -527,7 +527,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
- tcb->flags = TCPCB_FLAG_ACK;
+ tcb->flags = TCPHDR_ACK;
tcb->sacked = 0;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
@@ -815,7 +815,7 @@ new_segment:
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
copied += copy;
poffset += copy;
@@ -1061,7 +1061,7 @@ new_segment:
}
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
@@ -1898,6 +1898,10 @@ void tcp_close(struct sock *sk, long timeout)
sk_mem_reclaim(sk);
+ /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
+ if (sk->sk_state == TCP_CLOSE)
+ goto adjudge_to_death;
+
/* As outlined in RFC 2525, section 2.17, we send a RST here because
* data was lost. To witness the awful effects of the old behavior of
* always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
@@ -2958,7 +2962,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
spin_unlock(&tcp_md5sig_pool_lock);
if (p)
- return *per_cpu_ptr(p, smp_processor_id());
+ return *this_cpu_ptr(p);
local_bh_enable();
return NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 548d575..0433466 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3286,7 +3286,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* connection startup slow start one packet too
* quickly. This is severely frowned upon behavior.
*/
- if (!(scb->flags & TCPCB_FLAG_SYN)) {
+ if (!(scb->flags & TCPHDR_SYN)) {
flag |= FLAG_DATA_ACKED;
} else {
flag |= FLAG_SYN_ACKED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7f9515c..8fa32f5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,10 +204,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* TIME-WAIT * and initialize rx_opt.ts_recent from it,
* when trying new connection.
*/
- if (peer != NULL &&
- (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
- tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
- tp->rx_opt.ts_recent = peer->tcp_ts;
+ if (peer) {
+ inet_peer_refcheck(peer);
+ if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
+ tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
+ tp->rx_opt.ts_recent = peer->tcp_ts;
+ }
}
}
@@ -1326,14 +1328,12 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
- if (!want_cookie)
+ if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
if (want_cookie) {
-#ifdef CONFIG_SYN_COOKIES
- req->cookie_ts = tmp_opt.tstamp_ok;
-#endif
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
+ req->cookie_ts = tmp_opt.tstamp_ok;
} else if (!isn) {
struct inet_peer *peer = NULL;
@@ -1351,6 +1351,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
+ inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957..25ff62e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -294,9 +294,9 @@ static u16 tcp_select_window(struct sock *sk)
/* Packet ECN state for a SYN-ACK */
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
}
/* Packet ECN state for a SYN. */
@@ -306,7 +306,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
tp->ecn_flags = 0;
if (sysctl_tcp_ecn == 1) {
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
}
@@ -361,7 +361,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
skb_shinfo(skb)->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
- if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
+ if (flags & (TCPHDR_SYN | TCPHDR_FIN))
seq++;
TCP_SKB_CB(skb)->end_seq = seq;
}
@@ -820,7 +820,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
+ if (unlikely(tcb->flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -843,7 +843,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->flags);
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
+ if (unlikely(tcb->flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled.
*/
@@ -866,7 +866,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
tcp_options_write((__be32 *)(th + 1), tp, &opts);
- if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
+ if (likely((tcb->flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
@@ -880,7 +880,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb);
- if (likely(tcb->flags & TCPCB_FLAG_ACK))
+ if (likely(tcb->flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size)
@@ -1023,7 +1023,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
@@ -1328,8 +1328,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
- if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
- tcp_skb_pcount(skb) == 1)
+ if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
@@ -1398,7 +1397,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
return 1;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1461,7 +1460,7 @@ int tcp_may_send_now(struct sock *sk)
* packet has never been sent out before (and thus is not cloned).
*/
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
- unsigned int mss_now)
+ unsigned int mss_now, gfp_t gfp)
{
struct sk_buff *buff;
int nlen = skb->len - len;
@@ -1471,7 +1470,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
if (skb->len != skb->data_len)
return tcp_fragment(sk, skb, len, mss_now);
- buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
+ buff = sk_stream_alloc_skb(sk, 0, gfp);
if (unlikely(buff == NULL))
return -ENOMEM;
@@ -1487,7 +1486,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags;
/* This packet was never sent out yet, so no SACK bits. */
@@ -1518,7 +1517,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1644,7 +1643,7 @@ static int tcp_mtu_probe(struct sock *sk)
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
- TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
+ TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0;
nskb->ip_summed = skb->ip_summed;
@@ -1669,7 +1668,7 @@ static int tcp_mtu_probe(struct sock *sk)
sk_wmem_free_skb(sk, skb);
} else {
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
- ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
+ ~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1769,7 +1768,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
cwnd_quota);
if (skb->len > limit &&
- unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2020,7 +2019,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
if (!sysctl_tcp_retrans_collapse)
return;
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
return;
tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2112,7 +2111,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* since it is cheap to do so and saves bytes on the network.
*/
if (skb->len > 0 &&
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */
@@ -2301,7 +2300,7 @@ void tcp_send_fin(struct sock *sk)
mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
} else {
@@ -2318,7 +2317,7 @@ void tcp_send_fin(struct sock *sk)
skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
- TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
+ TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
@@ -2343,7 +2342,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
- TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
+ TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2363,11 +2362,11 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
- if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
+ if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
}
- if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
+ if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
@@ -2381,7 +2380,7 @@ int tcp_send_synack(struct sock *sk)
skb = nskb;
}
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2460,7 +2459,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
* not even correctly set)
*/
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
- TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
+ TCPHDR_SYN | TCPHDR_ACK);
if (OPTION_COOKIE_EXTENSION & opts.options) {
if (s_data_desired) {
@@ -2592,7 +2591,7 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tp->snd_nxt = tp->write_seq;
- tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
+ tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
TCP_ECN_send_syn(sk, buff);
/* Send it off. */
@@ -2698,7 +2697,7 @@ void tcp_send_ack(struct sock *sk)
/* Reserve space for headers and prepare control bits. */
skb_reserve(buff, MAX_TCP_HEADER);
- tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
+ tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2732,7 +2731,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
* end to send an ack. Don't queue or clone SKB, just
* send it.
*/
- tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
+ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
@@ -2762,13 +2761,13 @@ int tcp_write_wakeup(struct sock *sk)
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
seg_size = min(seg_size, mss);
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss))
return -1;
} else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss);
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b97bb1f..e81155d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -121,8 +121,6 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
static int __ipv6_regen_rndid(struct inet6_dev *idev);
static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static void ipv6_regen_rndid(unsigned long data);
-
-static int desync_factor = MAX_DESYNC_FACTOR * HZ;
#endif
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
@@ -284,13 +282,16 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
static int snmp6_alloc_dev(struct inet6_dev *idev)
{
if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
- sizeof(struct ipstats_mib)) < 0)
+ sizeof(struct ipstats_mib),
+ __alignof__(struct ipstats_mib)) < 0)
goto err_ip;
if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
- sizeof(struct icmpv6_mib)) < 0)
+ sizeof(struct icmpv6_mib),
+ __alignof__(struct icmpv6_mib)) < 0)
goto err_icmp;
if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
- sizeof(struct icmpv6msg_mib)) < 0)
+ sizeof(struct icmpv6msg_mib),
+ __alignof__(struct icmpv6msg_mib)) < 0)
goto err_icmpmsg;
return 0;
@@ -890,7 +891,8 @@ retry:
idev->cnf.temp_valid_lft);
tmp_prefered_lft = min_t(__u32,
ifp->prefered_lft,
- idev->cnf.temp_prefered_lft - desync_factor / HZ);
+ idev->cnf.temp_prefered_lft -
+ idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
tmp_cstamp = ifp->cstamp;
@@ -1650,7 +1652,8 @@ static void ipv6_regen_rndid(unsigned long data)
expires = jiffies +
idev->cnf.temp_prefered_lft * HZ -
- idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - desync_factor;
+ idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
+ idev->cnf.max_desync_factor * HZ;
if (time_before(expires, jiffies)) {
printk(KERN_WARNING
"ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
@@ -3492,8 +3495,12 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
preferred -= tval;
else
preferred = 0;
- if (valid != INFINITY_LIFE_TIME)
- valid -= tval;
+ if (valid != INFINITY_LIFE_TIME) {
+ if (valid > tval)
+ valid -= tval;
+ else
+ valid = 0;
+ }
}
} else {
preferred = INFINITY_LIFE_TIME;
@@ -3855,12 +3862,28 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
memset(&stats[items], 0, pad);
}
+static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib,
+ int items, int bytes, size_t syncpoff)
+{
+ int i;
+ int pad = bytes - sizeof(u64) * items;
+ BUG_ON(pad < 0);
+
+ /* Use put_unaligned() because stats may not be aligned for u64. */
+ put_unaligned(items, &stats[0]);
+ for (i = 1; i < items; i++)
+ put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
+
+ memset(&stats[items], 0, pad);
+}
+
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
int bytes)
{
switch (attrtype) {
case IFLA_INET6_STATS:
- __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
+ __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6,
+ IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
break;
case IFLA_INET6_ICMP6STATS:
__snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 94b1b9c..e830cd4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -971,19 +971,24 @@ static void ipv6_packet_cleanup(void)
static int __net_init ipv6_init_mibs(struct net *net)
{
if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
- sizeof (struct udp_mib)) < 0)
+ sizeof(struct udp_mib),
+ __alignof__(struct udp_mib)) < 0)
return -ENOMEM;
if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
- sizeof (struct udp_mib)) < 0)
+ sizeof(struct udp_mib),
+ __alignof__(struct udp_mib)) < 0)
goto err_udplite_mib;
if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
- sizeof(struct ipstats_mib)) < 0)
+ sizeof(struct ipstats_mib),
+ __alignof__(struct ipstats_mib)) < 0)
goto err_ip_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
- sizeof(struct icmpv6_mib)) < 0)
+ sizeof(struct icmpv6_mib),
+ __alignof__(struct icmpv6_mib)) < 0)
goto err_icmp_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
- sizeof(struct icmpv6msg_mib)) < 0)
+ sizeof(struct icmpv6msg_mib),
+ __alignof__(struct icmpv6msg_mib)) < 0)
goto err_icmpmsg_mib;
return 0;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index bd43f01..a7f66bc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -55,8 +55,6 @@
#include <asm/uaccess.h>
-DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
-
struct ip6_ra_chain *ip6_ra_chain;
DEFINE_RWLOCK(ip6_ra_lock);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 566798d..d082eae 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -174,17 +174,28 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
const struct snmp_mib *itemlist)
{
int i;
- for (i=0; itemlist[i].name; i++)
+
+ for (i = 0; itemlist[i].name; i++)
seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
snmp_fold_field(mib, itemlist[i].entry));
}
+static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
+ const struct snmp_mib *itemlist, size_t syncpoff)
+{
+ int i;
+
+ for (i = 0; itemlist[i].name; i++)
+ seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name,
+ snmp_fold_field64(mib, itemlist[i].entry, syncpoff));
+}
+
static int snmp6_seq_show(struct seq_file *seq, void *v)
{
struct net *net = (struct net *)seq->private;
- snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics,
- snmp6_ipstats_list);
+ snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
+ snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
snmp6_icmp6_list);
snmp6_seq_show_icmpv6msg(seq,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6d4292f..545c414 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -150,11 +150,8 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
EXPORT_SYMBOL(ip6_frag_match);
/* Memory Tracking Functions. */
-static inline void frag_kfree_skb(struct netns_frags *nf,
- struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
{
- if (work)
- *work -= skb->truesize;
atomic_sub(skb->truesize, &nf->mem);
kfree_skb(skb);
}
@@ -336,6 +333,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
* in the chain of fragments so far. We must know where to put
* this fragment, right?
*/
+ prev = fq->q.fragments_tail;
+ if (!prev || FRAG6_CB(prev)->offset < offset) {
+ next = NULL;
+ goto found;
+ }
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
@@ -343,6 +345,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
prev = next;
}
+found:
/* We found where to put this one. Check for overlap with
* preceding fragment, and, if needed, align things so that
* any overlaps are eliminated.
@@ -392,7 +395,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->q.fragments = next;
fq->q.meat -= free_it->len;
- frag_kfree_skb(fq->q.net, free_it, NULL);
+ frag_kfree_skb(fq->q.net, free_it);
}
}
@@ -400,6 +403,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
/* Insert this fragment in the chain of fragments. */
skb->next = next;
+ if (!next)
+ fq->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
@@ -466,6 +471,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
goto out_oom;
fp->next = head->next;
+ if (!fp->next)
+ fq->q.fragments_tail = fp;
prev->next = fp;
skb_morph(head, fq->q.fragments);
@@ -524,7 +531,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_shinfo(head)->frag_list = head->next;
skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head));
- atomic_sub(head->truesize, &fq->q.net->mem);
for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len;
@@ -534,8 +540,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
- atomic_sub(fp->truesize, &fq->q.net->mem);
}
+ atomic_sub(head->truesize, &fq->q.net->mem);
head->next = NULL;
head->dev = dev;
@@ -553,6 +559,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
rcu_read_unlock();
fq->q.fragments = NULL;
+ fq->q.fragments_tail = NULL;
return 1;
out_oversize:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 70d330f..09fd34f 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -164,6 +164,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
int mss;
struct dst_entry *dst;
__u8 rcv_wscale;
+ bool ecn_ok;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
@@ -180,8 +181,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
+ if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
+ goto out;
ret = NULL;
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
@@ -215,9 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->expires = 0UL;
req->retrans = 0;
- ireq->ecn_ok = 0;
+ ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5887141..5ebc27e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1269,13 +1269,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
treq = inet6_rsk(req);
ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
- if (!want_cookie)
+ if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
- if (want_cookie) {
- isn = cookie_v6_init_sequence(sk, skb, &req->mss);
- req->cookie_ts = tmp_opt.tstamp_ok;
- } else if (!isn) {
+ if (!isn) {
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1288,8 +1285,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
treq->iif = inet6_iif(skb);
-
- isn = tcp_v6_init_sequence(skb);
+ if (!want_cookie) {
+ isn = tcp_v6_init_sequence(skb);
+ } else {
+ isn = cookie_v6_init_sequence(sk, skb, &req->mss);
+ req->cookie_ts = tmp_opt.tstamp_ok;
+ }
}
tcp_rsk(req)->snt_isn = isn;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 83eec7a..4d6f865 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -69,6 +69,7 @@ endchoice
config MAC80211_RC_DEFAULT
string
+ default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT
default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL
default "pid" if MAC80211_RC_DEFAULT_PID
default ""
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 6bb9a9a..965b272 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -6,39 +6,70 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2008, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+/**
+ * DOC: RX A-MPDU aggregation
+ *
+ * Aggregation on the RX side requires only implementing the
+ * @ampdu_action callback that is invoked to start/stop any
+ * block-ack sessions for RX aggregation.
+ *
+ * When RX aggregation is started by the peer, the driver is
+ * notified via @ampdu_action function, with the
+ * %IEEE80211_AMPDU_RX_START action, and may reject the request
+ * in which case a negative response is sent to the peer, if it
+ * accepts it a positive response is sent.
+ *
+ * While the session is active, the device/driver are required
+ * to de-aggregate frames and pass them up one by one to mac80211,
+ * which will handle the reorder buffer.
+ *
+ * When the aggregation session is stopped again by the peer or
+ * ourselves, the driver's @ampdu_action function will be called
+ * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the
+ * call must not fail.
+ */
+
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
-static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
- u16 initiator, u16 reason,
- bool from_timer)
+static void ieee80211_free_tid_rx(struct rcu_head *h)
{
- struct ieee80211_local *local = sta->local;
- struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_rx *tid_rx =
+ container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
- spin_lock_bh(&sta->lock);
+ for (i = 0; i < tid_rx->buf_size; i++)
+ dev_kfree_skb(tid_rx->reorder_buf[i]);
+ kfree(tid_rx->reorder_buf);
+ kfree(tid_rx->reorder_time);
+ kfree(tid_rx);
+}
- /* check if TID is in operational state */
- if (!sta->ampdu_mlme.tid_active_rx[tid]) {
- spin_unlock_bh(&sta->lock);
- return;
- }
+void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason)
+{
+ struct ieee80211_local *local = sta->local;
+ struct tid_ampdu_rx *tid_rx;
- sta->ampdu_mlme.tid_active_rx[tid] = false;
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_rx = sta->ampdu_mlme.tid_rx[tid];
+ if (!tid_rx)
+ return;
+
+ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
@@ -54,32 +85,17 @@ static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, 0, reason);
- /* free the reordering buffer */
- for (i = 0; i < tid_rx->buf_size; i++) {
- if (tid_rx->reorder_buf[i]) {
- /* release the reordered frames */
- dev_kfree_skb(tid_rx->reorder_buf[i]);
- tid_rx->stored_mpdu_num--;
- tid_rx->reorder_buf[i] = NULL;
- }
- }
-
- /* free resources */
- kfree(tid_rx->reorder_buf);
- kfree(tid_rx->reorder_time);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
-
- spin_unlock_bh(&sta->lock);
+ del_timer_sync(&tid_rx->session_timer);
- if (!from_timer)
- del_timer_sync(&tid_rx->session_timer);
- kfree(tid_rx);
+ call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason)
{
- ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false);
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
}
/*
@@ -100,8 +116,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
#endif
- ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
- WLAN_REASON_QSTA_TIMEOUT, true);
+ set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
+ ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -212,9 +228,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* examine state machine */
- spin_lock_bh(&sta->lock);
+ mutex_lock(&sta->ampdu_mlme.mtx);
- if (sta->ampdu_mlme.tid_active_rx[tid]) {
+ if (sta->ampdu_mlme.tid_rx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -225,9 +241,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
/* prepare A-MPDU MLME for Rx aggregation */
- sta->ampdu_mlme.tid_rx[tid] =
- kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
- if (!sta->ampdu_mlme.tid_rx[tid]) {
+ tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
+ if (!tid_agg_rx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
@@ -235,14 +250,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
#endif
goto end;
}
- /* rx timer */
- sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
- sta_rx_agg_session_timer_expired;
- sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
- (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
- tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+ /* rx timer */
+ tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
+ tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+ init_timer(&tid_agg_rx->session_timer);
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
@@ -257,8 +269,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
#endif
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
- kfree(sta->ampdu_mlme.tid_rx[tid]);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
+ kfree(tid_agg_rx);
goto end;
}
@@ -270,13 +281,12 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
if (ret) {
kfree(tid_agg_rx->reorder_buf);
+ kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
goto end;
}
- /* change state and send addba resp */
- sta->ampdu_mlme.tid_active_rx[tid] = true;
+ /* update data */
tid_agg_rx->dialog_token = dialog_token;
tid_agg_rx->ssn = start_seq_num;
tid_agg_rx->head_seq_num = start_seq_num;
@@ -284,8 +294,15 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
tid_agg_rx->timeout = timeout;
tid_agg_rx->stored_mpdu_num = 0;
status = WLAN_STATUS_SUCCESS;
+
+ /* activate it for RX */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+
+ if (timeout)
+ mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+
end:
- spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
end_no_lock:
ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index d1b6664..c893f23 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -6,7 +6,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2009, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,28 +21,39 @@
#include "wme.h"
/**
- * DOC: TX aggregation
+ * DOC: TX A-MPDU aggregation
*
* Aggregation on the TX side requires setting the hardware flag
- * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
- * hardware parameter to the number of hardware AMPDU queues. If there are no
- * hardware queues then the driver will (currently) have to do all frame
- * buffering.
+ * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
+ * packets with a flag indicating A-MPDU aggregation. The driver
+ * or device is responsible for actually aggregating the frames,
+ * as well as deciding how many and which to aggregate.
*
- * When TX aggregation is started by some subsystem (usually the rate control
- * algorithm would be appropriate) by calling the
- * ieee80211_start_tx_ba_session() function, the driver will be notified via
- * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
+ * When TX aggregation is started by some subsystem (usually the rate
+ * control algorithm would be appropriate) by calling the
+ * ieee80211_start_tx_ba_session() function, the driver will be
+ * notified via its @ampdu_action function, with the
+ * %IEEE80211_AMPDU_TX_START action.
*
* In response to that, the driver is later required to call the
- * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
- * function, which will start the aggregation session.
+ * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
+ * start the aggregation session after the peer has also responded.
+ * If the peer responds negatively, the session will be stopped
+ * again right away. Note that it is possible for the aggregation
+ * session to be stopped before the driver has indicated that it
+ * is done setting it up, in which case it must not indicate the
+ * setup completion.
*
- * Similarly, when the aggregation session is stopped by
- * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
- * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
- * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
- * (or ieee80211_stop_tx_ba_cb_irqsafe()).
+ * Also note that, since we also need to wait for a response from
+ * the peer, the driver is notified of the completion of the
+ * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
+ * @ampdu_action callback.
+ *
+ * Similarly, when the aggregation session is stopped by the peer
+ * or something calling ieee80211_stop_tx_ba_session(), the driver's
+ * @ampdu_action function will be called with the action
+ * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
+ * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
*/
static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -125,25 +136,53 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
ieee80211_tx_skb(sdata, skb);
}
+static void kfree_tid_tx(struct rcu_head *rcu_head)
+{
+ struct tid_ampdu_tx *tid_tx =
+ container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
+
+ kfree(tid_tx);
+}
+
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator)
{
struct ieee80211_local *local = sta->local;
+ struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
int ret;
- u8 *state;
+
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
+ if (!tid_tx)
+ return -ENOENT;
+
+ spin_lock_bh(&sta->lock);
+
+ if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+ /* not even started yet! */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
+ spin_unlock_bh(&sta->lock);
+ call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+ return 0;
+ }
+
+ spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- state = &sta->ampdu_mlme.tid_state_tx[tid];
+ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
- if (*state == HT_AGG_STATE_OPERATIONAL)
- sta->ampdu_mlme.addba_req_num[tid] = 0;
+ /*
+ * After this packets are no longer handed right through
+ * to the driver but are put onto tid_tx->pending instead,
+ * with locking to ensure proper access.
+ */
+ clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
- *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
- (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
+ tid_tx->stop_initiator = initiator;
ret = drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_STOP,
@@ -174,16 +213,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
u16 tid = *(u8 *)data;
struct sta_info *sta = container_of((void *)data,
struct sta_info, timer_to_tid[tid]);
- u8 *state;
-
- state = &sta->ampdu_mlme.tid_state_tx[tid];
+ struct tid_ampdu_tx *tid_tx;
/* check if the TID waits for addBA response */
- spin_lock_bh(&sta->lock);
- if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
- HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
- HT_ADDBA_REQUESTED_MSK) {
- spin_unlock_bh(&sta->lock);
+ rcu_read_lock();
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx ||
+ test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
+ rcu_read_unlock();
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
"(or no longer) expecting addBA response there\n",
@@ -196,8 +233,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
#endif
- ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
- spin_unlock_bh(&sta->lock);
+ ieee80211_stop_tx_ba_session(&sta->sta, tid);
+ rcu_read_unlock();
}
static inline int ieee80211_ac_from_tid(int tid)
@@ -205,14 +242,112 @@ static inline int ieee80211_ac_from_tid(int tid)
return ieee802_1d_to_ac[tid & 7];
}
+/*
+ * When multiple aggregation sessions on multiple stations
+ * are being created/destroyed simultaneously, we need to
+ * refcount the global queue stop caused by that in order
+ * to not get into a situation where one of the aggregation
+ * setup or teardown re-enables queues before the other is
+ * ready to handle that.
+ *
+ * These two functions take care of this issue by keeping
+ * a global "agg_queue_stop" refcount.
+ */
+static void __acquires(agg_queue)
+ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+
+ if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
+ ieee80211_stop_queue_by_reason(
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ __acquire(agg_queue);
+}
+
+static void __releases(agg_queue)
+ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+
+ if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
+ ieee80211_wake_queue_by_reason(
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ __release(agg_queue);
+}
+
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+{
+ struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u16 start_seq_num;
+ int ret;
+
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
+ /*
+ * While we're asking the driver about the aggregation,
+ * stop the AC queue so that we don't have to worry
+ * about frames that came in while we were doing that,
+ * which would require us to put them to the AC pending
+ * afterwards which just makes the code more complex.
+ */
+ ieee80211_stop_queue_agg(local, tid);
+
+ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+ /*
+ * make sure no packets are being processed to get
+ * valid starting sequence number
+ */
+ synchronize_net();
+
+ start_seq_num = sta->tid_seq[tid] >> 4;
+
+ ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
+ &sta->sta, tid, &start_seq_num);
+ if (ret) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "BA request denied - HW unavailable for"
+ " tid %d\n", tid);
+#endif
+ spin_lock_bh(&sta->lock);
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
+ spin_unlock_bh(&sta->lock);
+
+ ieee80211_wake_queue_agg(local, tid);
+ call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+ return;
+ }
+
+ /* we can take packets again now */
+ ieee80211_wake_queue_agg(local, tid);
+
+ /* activate the timer for the recipient's addBA response */
+ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
+#endif
+
+ spin_lock_bh(&sta->lock);
+ sta->ampdu_mlme.addba_req_num[tid]++;
+ spin_unlock_bh(&sta->lock);
+
+ /* send AddBA request */
+ ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
+ tid_tx->dialog_token, start_seq_num,
+ 0x40, 5000);
+}
+
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- u8 *state;
+ struct tid_ampdu_tx *tid_tx;
int ret = 0;
- u16 start_seq_num;
trace_api_start_tx_ba_session(pubsta, tid);
@@ -239,24 +374,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
- if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Disassociation is in progress. "
- "Denying BA session request\n");
-#endif
- return -EINVAL;
- }
-
if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Suspend in progress. "
+ printk(KERN_DEBUG "BA sessions blocked. "
"Denying BA session request\n");
#endif
return -EINVAL;
}
spin_lock_bh(&sta->lock);
- spin_lock(&local->ampdu_lock);
/* we have tried too many times, receiver does not want A-MPDU */
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
@@ -264,9 +390,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
goto err_unlock_sta;
}
- state = &sta->ampdu_mlme.tid_state_tx[tid];
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
/* check if the TID is not in aggregation flow already */
- if (*state != HT_AGG_STATE_IDLE) {
+ if (tid_tx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA request denied - session is not "
"idle on tid %u\n", tid);
@@ -275,96 +401,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
goto err_unlock_sta;
}
- /*
- * While we're asking the driver about the aggregation,
- * stop the AC queue so that we don't have to worry
- * about frames that came in while we were doing that,
- * which would require us to put them to the AC pending
- * afterwards which just makes the code more complex.
- */
- ieee80211_stop_queue_by_reason(
- &local->hw, ieee80211_ac_from_tid(tid),
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
/* prepare A-MPDU MLME for Tx aggregation */
- sta->ampdu_mlme.tid_tx[tid] =
- kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
- if (!sta->ampdu_mlme.tid_tx[tid]) {
+ tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
+ if (!tid_tx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
tid);
#endif
ret = -ENOMEM;
- goto err_wake_queue;
+ goto err_unlock_sta;
}
- skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending);
+ skb_queue_head_init(&tid_tx->pending);
+ __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/* Tx timer */
- sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
- sta_addba_resp_timer_expired;
- sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
- (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-
- /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
- * call back right away, it must see that the flow has begun */
- *state |= HT_ADDBA_REQUESTED_MSK;
-
- start_seq_num = sta->tid_seq[tid] >> 4;
-
- ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
- pubsta, tid, &start_seq_num);
+ tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
+ tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+ init_timer(&tid_tx->addba_resp_timer);
- if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - HW unavailable for"
- " tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
- *state = HT_AGG_STATE_IDLE;
- goto err_free;
- }
-
- /* Driver vetoed or OKed, but we can take packets again now */
- ieee80211_wake_queue_by_reason(
- &local->hw, ieee80211_ac_from_tid(tid),
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
- spin_unlock(&local->ampdu_lock);
-
- /* prepare tid data */
+ /* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
- sta->ampdu_mlme.tid_tx[tid]->dialog_token =
- sta->ampdu_mlme.dialog_token_allocator;
- sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
+ tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
- spin_unlock_bh(&sta->lock);
+ /* finally, assign it to the array */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
- /* send AddBA request */
- ieee80211_send_addba_request(sdata, pubsta->addr, tid,
- sta->ampdu_mlme.tid_tx[tid]->dialog_token,
- sta->ampdu_mlme.tid_tx[tid]->ssn,
- 0x40, 5000);
- sta->ampdu_mlme.addba_req_num[tid]++;
- /* activate the timer for the recipient's addBA response */
- sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
- jiffies + ADDBA_RESP_INTERVAL;
- add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
- return 0;
-
- err_free:
- kfree(sta->ampdu_mlme.tid_tx[tid]);
- sta->ampdu_mlme.tid_tx[tid] = NULL;
- err_wake_queue:
- ieee80211_wake_queue_by_reason(
- &local->hw, ieee80211_ac_from_tid(tid),
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ /* this flow continues off the work */
err_unlock_sta:
- spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
return ret;
}
@@ -372,69 +439,65 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
/*
* splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish and holding
- * local->ampdu_lock across both calls.
+ * requires a call to ieee80211_agg_splice_finish later
*/
-static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
- struct sta_info *sta, u16 tid)
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
{
+ int queue = ieee80211_ac_from_tid(tid);
unsigned long flags;
- u16 queue = ieee80211_ac_from_tid(tid);
-
- ieee80211_stop_queue_by_reason(
- &local->hw, queue,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
- return;
+ ieee80211_stop_queue_agg(local, tid);
- if (WARN(!sta->ampdu_mlme.tid_tx[tid],
- "TID %d gone but expected when splicing aggregates from"
- "the pending queue\n", tid))
+ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+ " from the pending queue\n", tid))
return;
- if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
+ if (!skb_queue_empty(&tid_tx->pending)) {
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
/* copy over remaining packets */
- skb_queue_splice_tail_init(
- &sta->ampdu_mlme.tid_tx[tid]->pending,
- &local->pending[queue]);
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
}
-static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
- struct sta_info *sta, u16 tid)
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
{
- u16 queue = ieee80211_ac_from_tid(tid);
-
- ieee80211_wake_queue_by_reason(
- &local->hw, queue,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ ieee80211_wake_queue_agg(local, tid);
}
-/* caller must hold sta->lock */
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
#endif
- spin_lock(&local->ampdu_lock);
- ieee80211_agg_splice_packets(local, sta, tid);
- /*
- * NB: we rely on sta->lock being taken in the TX
- * processing here when adding to the pending queue,
- * otherwise we could only change the state of the
- * session to OPERATIONAL _here_.
- */
- ieee80211_agg_splice_finish(local, sta, tid);
- spin_unlock(&local->ampdu_lock);
-
drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
&sta->sta, tid, NULL);
+
+ /*
+ * synchronize with TX path, while splicing the TX path
+ * should block so it won't put more packets onto pending.
+ */
+ spin_lock_bh(&sta->lock);
+
+ ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
+ /*
+ * Now mark as operational. This will be visible
+ * in the TX path, and lets it go lock-free in
+ * the common case.
+ */
+ set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
+ ieee80211_agg_splice_finish(local, tid);
+
+ spin_unlock_bh(&sta->lock);
}
void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
@@ -442,7 +505,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- u8 *state;
+ struct tid_ampdu_tx *tid_tx;
trace_api_start_tx_ba_cb(sdata, ra, tid);
@@ -454,42 +517,36 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
return;
}
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, ra);
if (!sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
#endif
return;
}
- state = &sta->ampdu_mlme.tid_state_tx[tid];
- spin_lock_bh(&sta->lock);
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
- if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) {
+ if (WARN_ON(!tid_tx)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
- *state);
+ printk(KERN_DEBUG "addBA was not requested!\n");
#endif
- spin_unlock_bh(&sta->lock);
- rcu_read_unlock();
- return;
+ goto unlock;
}
- if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK))
- goto out;
-
- *state |= HT_ADDBA_DRV_READY_MSK;
+ if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+ goto unlock;
- if (*state == HT_AGG_STATE_OPERATIONAL)
+ if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
- out:
- spin_unlock_bh(&sta->lock);
- rcu_read_unlock();
+ unlock:
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+ mutex_unlock(&local->sta_mtx);
}
-EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
@@ -510,33 +567,24 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
- ra_tid->vif = vif;
- skb->pkt_type = IEEE80211_ADDBA_MSG;
- skb_queue_tail(&local->skb_queue, skb);
- tasklet_schedule(&local->tasklet);
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator)
{
- u8 *state;
int ret;
- /* check if the TID is in aggregation */
- state = &sta->ampdu_mlme.tid_state_tx[tid];
- spin_lock_bh(&sta->lock);
-
- if (*state != HT_AGG_STATE_OPERATIONAL) {
- ret = -ENOENT;
- goto unlock;
- }
+ mutex_lock(&sta->ampdu_mlme.mtx);
ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
- unlock:
- spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+
return ret;
}
@@ -545,6 +593,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ struct tid_ampdu_tx *tid_tx;
+ int ret = 0;
trace_api_stop_tx_ba_session(pubsta, tid);
@@ -554,7 +604,26 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
if (tid >= STA_TID_NUM)
return -EINVAL;
- return __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
+ spin_lock_bh(&sta->lock);
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+
+ if (!tid_tx) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ /* already in progress stopping it */
+ ret = 0;
+ goto unlock;
+ }
+
+ set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ unlock:
+ spin_unlock_bh(&sta->lock);
+ return ret;
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
@@ -563,7 +632,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- u8 *state;
+ struct tid_ampdu_tx *tid_tx;
trace_api_stop_tx_ba_cb(sdata, ra, tid);
@@ -580,51 +649,56 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
ra, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
+
sta = sta_info_get(sdata, ra);
if (!sta) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
#endif
- rcu_read_unlock();
- return;
+ goto unlock;
}
- state = &sta->ampdu_mlme.tid_state_tx[tid];
- /* NOTE: no need to use sta->lock in this state check, as
- * ieee80211_stop_tx_ba_session will let only one stop call to
- * pass through per sta/tid
- */
- if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ spin_lock_bh(&sta->lock);
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+
+ if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
#endif
- rcu_read_unlock();
- return;
+ goto unlock_sta;
}
- if (*state & HT_AGG_STATE_INITIATOR_MSK)
+ if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
ieee80211_send_delba(sta->sdata, ra, tid,
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
- spin_lock_bh(&sta->lock);
- spin_lock(&local->ampdu_lock);
+ /*
+ * When we get here, the TX path will not be lockless any more wrt.
+ * aggregation, since the OPERATIONAL bit has long been cleared.
+ * Thus it will block on getting the lock, if it occurs. So if we
+ * stop the queue now, we will not get any more packets, and any
+ * that might be being processed will wait for us here, thereby
+ * guaranteeing that no packets go to the tid_tx pending queue any
+ * more.
+ */
- ieee80211_agg_splice_packets(local, sta, tid);
+ ieee80211_agg_splice_packets(local, tid_tx, tid);
- *state = HT_AGG_STATE_IDLE;
- /* from now on packets are no longer put onto sta->pending */
- kfree(sta->ampdu_mlme.tid_tx[tid]);
- sta->ampdu_mlme.tid_tx[tid] = NULL;
+ /* future packets must not find the tid_tx struct any more */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
- ieee80211_agg_splice_finish(local, sta, tid);
+ ieee80211_agg_splice_finish(local, tid);
- spin_unlock(&local->ampdu_lock);
- spin_unlock_bh(&sta->lock);
+ call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
- rcu_read_unlock();
+ unlock_sta:
+ spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+ unlock:
+ mutex_unlock(&local->sta_mtx);
}
-EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
@@ -645,11 +719,10 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
- ra_tid->vif = vif;
- skb->pkt_type = IEEE80211_DELBA_MSG;
- skb_queue_tail(&local->skb_queue, skb);
- tasklet_schedule(&local->tasklet);
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
@@ -659,40 +732,40 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
struct ieee80211_mgmt *mgmt,
size_t len)
{
+ struct tid_ampdu_tx *tid_tx;
u16 capab, tid;
- u8 *state;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
- state = &sta->ampdu_mlme.tid_state_tx[tid];
-
- spin_lock_bh(&sta->lock);
+ mutex_lock(&sta->ampdu_mlme.mtx);
- if (!(*state & HT_ADDBA_REQUESTED_MSK))
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+ if (!tid_tx)
goto out;
- if (mgmt->u.action.u.addba_resp.dialog_token !=
- sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
+ if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+#endif
goto out;
}
- del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+ del_timer(&tid_tx->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+#endif
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS) {
- u8 curstate = *state;
-
- *state |= HT_ADDBA_RECEIVED_MSK;
+ if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
+ &tid_tx->state)) {
+ /* ignore duplicate response */
+ goto out;
+ }
- if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL)
+ if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
sta->ampdu_mlme.addba_req_num[tid] = 0;
@@ -701,5 +774,5 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
}
out:
- spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 1f76d04..9eb02a3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -413,9 +413,6 @@ static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- if (!local->ops->get_survey)
- return -EOPNOTSUPP;
-
return drv_get_survey(local, idx, survey);
}
@@ -1329,28 +1326,28 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
}
static int ieee80211_set_tx_power(struct wiphy *wiphy,
- enum tx_power_setting type, int dbm)
+ enum nl80211_tx_power_setting type, int mbm)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_channel *chan = local->hw.conf.channel;
u32 changes = 0;
switch (type) {
- case TX_POWER_AUTOMATIC:
+ case NL80211_TX_POWER_AUTOMATIC:
local->user_power_level = -1;
break;
- case TX_POWER_LIMITED:
- if (dbm < 0)
- return -EINVAL;
- local->user_power_level = dbm;
+ case NL80211_TX_POWER_LIMITED:
+ if (mbm < 0 || (mbm % 100))
+ return -EOPNOTSUPP;
+ local->user_power_level = MBM_TO_DBM(mbm);
break;
- case TX_POWER_FIXED:
- if (dbm < 0)
- return -EINVAL;
+ case NL80211_TX_POWER_FIXED:
+ if (mbm < 0 || (mbm % 100))
+ return -EOPNOTSUPP;
/* TODO: move to cfg80211 when it knows the channel */
- if (dbm > chan->max_power)
+ if (MBM_TO_DBM(mbm) > chan->max_power)
return -EINVAL;
- local->user_power_level = dbm;
+ local->user_power_level = MBM_TO_DBM(mbm);
break;
}
@@ -1446,7 +1443,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- struct ieee80211_conf *conf = &local->hw.conf;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
@@ -1455,11 +1451,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (enabled == sdata->u.mgd.powersave &&
- timeout == conf->dynamic_ps_forced_timeout)
+ timeout == local->dynamic_ps_forced_timeout)
return 0;
sdata->u.mgd.powersave = enabled;
- conf->dynamic_ps_forced_timeout = timeout;
+ local->dynamic_ps_forced_timeout = timeout;
/* no change, but if automatic follow powersave */
mutex_lock(&sdata->u.mgd.mtx);
@@ -1555,9 +1551,55 @@ static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
bool channel_type_valid,
const u8 *buf, size_t len, u64 *cookie)
{
- return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan,
- channel_type, channel_type_valid,
- buf, len, cookie);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct sta_info *sta;
+ const struct ieee80211_mgmt *mgmt = (void *)buf;
+ u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ /* Check that we are on the requested channel for transmission */
+ if (chan != local->tmp_channel &&
+ chan != local->oper_channel)
+ return -EBUSY;
+ if (channel_type_valid &&
+ (channel_type != local->tmp_channel_type &&
+ channel_type != local->_oper_channel_type))
+ return -EBUSY;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_ADHOC:
+ if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
+ break;
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->da);
+ rcu_read_unlock();
+ if (!sta)
+ return -ENOLINK;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
+ flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ memcpy(skb_put(skb, len), buf, len);
+
+ IEEE80211_SKB_CB(skb)->flags = flags;
+
+ skb->dev = sdata->dev;
+ ieee80211_tx_skb(sdata, skb);
+
+ *cookie = (unsigned long) skb;
+ return 0;
}
struct cfg80211_ops mac80211_config_ops = {
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 97c9e46..fa5e76e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -143,7 +143,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
len = p - buf;
break;
case ALG_CCMP:
- for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
+ for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
rpn = key->u.ccmp.rx_pn[i];
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 576e024..76839d4 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -121,28 +121,25 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
- "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
+ "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
for (i = 0; i < STA_TID_NUM; i++) {
p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- sta->ampdu_mlme.tid_active_rx[i]);
+ !!sta->ampdu_mlme.tid_rx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_active_rx[i] ?
+ sta->ampdu_mlme.tid_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_active_rx[i] ?
+ sta->ampdu_mlme.tid_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- sta->ampdu_mlme.tid_state_tx[i]);
+ !!sta->ampdu_mlme.tid_tx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_state_tx[i] ?
+ sta->ampdu_mlme.tid_tx[i] ?
sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
- p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_state_tx[i] ?
- sta->ampdu_mlme.tid_tx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
- sta->ampdu_mlme.tid_state_tx[i] ?
+ sta->ampdu_mlme.tid_tx[i] ?
skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\n");
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 7d18a32..14123dc 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -16,10 +16,11 @@ static inline int drv_start(struct ieee80211_local *local)
might_sleep();
+ trace_drv_start(local);
local->started = true;
smp_mb();
ret = local->ops->start(&local->hw);
- trace_drv_start(local, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -27,8 +28,9 @@ static inline void drv_stop(struct ieee80211_local *local)
{
might_sleep();
- local->ops->stop(&local->hw);
trace_drv_stop(local);
+ local->ops->stop(&local->hw);
+ trace_drv_return_void(local);
/* sync away all work on the tasklet before clearing started */
tasklet_disable(&local->tasklet);
@@ -46,8 +48,9 @@ static inline int drv_add_interface(struct ieee80211_local *local,
might_sleep();
+ trace_drv_add_interface(local, vif_to_sdata(vif));
ret = local->ops->add_interface(&local->hw, vif);
- trace_drv_add_interface(local, vif_to_sdata(vif), ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -56,8 +59,9 @@ static inline void drv_remove_interface(struct ieee80211_local *local,
{
might_sleep();
- local->ops->remove_interface(&local->hw, vif);
trace_drv_remove_interface(local, vif_to_sdata(vif));
+ local->ops->remove_interface(&local->hw, vif);
+ trace_drv_return_void(local);
}
static inline int drv_config(struct ieee80211_local *local, u32 changed)
@@ -66,8 +70,9 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed)
might_sleep();
+ trace_drv_config(local, changed);
ret = local->ops->config(&local->hw, changed);
- trace_drv_config(local, changed, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -78,26 +83,10 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
{
might_sleep();
+ trace_drv_bss_info_changed(local, sdata, info, changed);
if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
- trace_drv_bss_info_changed(local, sdata, info, changed);
-}
-
-struct in_ifaddr;
-static inline int drv_configure_arp_filter(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
- struct in_ifaddr *ifa_list)
-{
- int ret = 0;
-
- might_sleep();
-
- if (local->ops->configure_arp_filter)
- ret = local->ops->configure_arp_filter(&local->hw, vif,
- ifa_list);
-
- trace_drv_configure_arp_filter(local, vif_to_sdata(vif), ifa_list, ret);
- return ret;
+ trace_drv_return_void(local);
}
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -105,10 +94,12 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
{
u64 ret = 0;
+ trace_drv_prepare_multicast(local, mc_list->count);
+
if (local->ops->prepare_multicast)
ret = local->ops->prepare_multicast(&local->hw, mc_list);
- trace_drv_prepare_multicast(local, mc_list->count, ret);
+ trace_drv_return_u64(local, ret);
return ret;
}
@@ -120,19 +111,21 @@ static inline void drv_configure_filter(struct ieee80211_local *local,
{
might_sleep();
- local->ops->configure_filter(&local->hw, changed_flags, total_flags,
- multicast);
trace_drv_configure_filter(local, changed_flags, total_flags,
multicast);
+ local->ops->configure_filter(&local->hw, changed_flags, total_flags,
+ multicast);
+ trace_drv_return_void(local);
}
static inline int drv_set_tim(struct ieee80211_local *local,
struct ieee80211_sta *sta, bool set)
{
int ret = 0;
+ trace_drv_set_tim(local, sta, set);
if (local->ops->set_tim)
ret = local->ops->set_tim(&local->hw, sta, set);
- trace_drv_set_tim(local, sta, set, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -146,8 +139,9 @@ static inline int drv_set_key(struct ieee80211_local *local,
might_sleep();
+ trace_drv_set_key(local, cmd, sdata, sta, key);
ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
- trace_drv_set_key(local, cmd, sdata, sta, key, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -162,10 +156,11 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
if (sta)
ista = &sta->sta;
+ trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
if (local->ops->update_tkip_key)
local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
ista, iv32, phase1key);
- trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
+ trace_drv_return_void(local);
}
static inline int drv_hw_scan(struct ieee80211_local *local,
@@ -176,8 +171,9 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
might_sleep();
+ trace_drv_hw_scan(local, sdata, req);
ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
- trace_drv_hw_scan(local, sdata, req, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -185,18 +181,20 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
{
might_sleep();
+ trace_drv_sw_scan_start(local);
if (local->ops->sw_scan_start)
local->ops->sw_scan_start(&local->hw);
- trace_drv_sw_scan_start(local);
+ trace_drv_return_void(local);
}
static inline void drv_sw_scan_complete(struct ieee80211_local *local)
{
might_sleep();
+ trace_drv_sw_scan_complete(local);
if (local->ops->sw_scan_complete)
local->ops->sw_scan_complete(&local->hw);
- trace_drv_sw_scan_complete(local);
+ trace_drv_return_void(local);
}
static inline int drv_get_stats(struct ieee80211_local *local,
@@ -228,9 +226,10 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
might_sleep();
+ trace_drv_set_rts_threshold(local, value);
if (local->ops->set_rts_threshold)
ret = local->ops->set_rts_threshold(&local->hw, value);
- trace_drv_set_rts_threshold(local, value, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -240,12 +239,13 @@ static inline int drv_set_coverage_class(struct ieee80211_local *local,
int ret = 0;
might_sleep();
+ trace_drv_set_coverage_class(local, value);
if (local->ops->set_coverage_class)
local->ops->set_coverage_class(&local->hw, value);
else
ret = -EOPNOTSUPP;
- trace_drv_set_coverage_class(local, value, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -254,9 +254,10 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
+ trace_drv_sta_notify(local, sdata, cmd, sta);
if (local->ops->sta_notify)
local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
- trace_drv_sta_notify(local, sdata, cmd, sta);
+ trace_drv_return_void(local);
}
static inline int drv_sta_add(struct ieee80211_local *local,
@@ -267,10 +268,11 @@ static inline int drv_sta_add(struct ieee80211_local *local,
might_sleep();
+ trace_drv_sta_add(local, sdata, sta);
if (local->ops->sta_add)
ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
- trace_drv_sta_add(local, sdata, sta, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -281,10 +283,11 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
{
might_sleep();
+ trace_drv_sta_remove(local, sdata, sta);
if (local->ops->sta_remove)
local->ops->sta_remove(&local->hw, &sdata->vif, sta);
- trace_drv_sta_remove(local, sdata, sta);
+ trace_drv_return_void(local);
}
static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
@@ -294,9 +297,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
might_sleep();
+ trace_drv_conf_tx(local, queue, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, queue, params);
- trace_drv_conf_tx(local, queue, params, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -306,9 +310,10 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
might_sleep();
+ trace_drv_get_tsf(local);
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw);
- trace_drv_get_tsf(local, ret);
+ trace_drv_return_u64(local, ret);
return ret;
}
@@ -316,18 +321,20 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
{
might_sleep();
+ trace_drv_set_tsf(local, tsf);
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, tsf);
- trace_drv_set_tsf(local, tsf);
+ trace_drv_return_void(local);
}
static inline void drv_reset_tsf(struct ieee80211_local *local)
{
might_sleep();
+ trace_drv_reset_tsf(local);
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw);
- trace_drv_reset_tsf(local);
+ trace_drv_return_void(local);
}
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
@@ -336,9 +343,10 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
might_sleep();
+ trace_drv_tx_last_beacon(local);
if (local->ops->tx_last_beacon)
ret = local->ops->tx_last_beacon(&local->hw);
- trace_drv_tx_last_beacon(local, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -349,10 +357,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
u16 *ssn)
{
int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+
if (local->ops->ampdu_action)
ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
sta, tid, ssn);
- trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
+
+ trace_drv_return_int(local, ret);
+
return ret;
}
@@ -360,9 +375,14 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx,
struct survey_info *survey)
{
int ret = -EOPNOTSUPP;
+
+ trace_drv_get_survey(local, idx, survey);
+
if (local->ops->get_survey)
ret = local->ops->get_survey(&local->hw, idx, survey);
- /* trace_drv_get_survey(local, idx, survey, ret); */
+
+ trace_drv_return_int(local, ret);
+
return ret;
}
@@ -381,6 +401,7 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop)
trace_drv_flush(local, drop);
if (local->ops->flush)
local->ops->flush(&local->hw, drop);
+ trace_drv_return_void(local);
}
static inline void drv_channel_switch(struct ieee80211_local *local,
@@ -388,9 +409,9 @@ static inline void drv_channel_switch(struct ieee80211_local *local,
{
might_sleep();
- local->ops->channel_switch(&local->hw, ch_switch);
-
trace_drv_channel_switch(local, ch_switch);
+ local->ops->channel_switch(&local->hw, ch_switch);
+ trace_drv_return_void(local);
}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6b906301..5d5d2a9 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -36,20 +36,58 @@ static inline void trace_ ## name(proto) {}
* Tracing for driver callbacks.
*/
-TRACE_EVENT(drv_start,
- TP_PROTO(struct ieee80211_local *local, int ret),
+TRACE_EVENT(drv_return_void,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+ TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
+);
+TRACE_EVENT(drv_return_int,
+ TP_PROTO(struct ieee80211_local *local, int ret),
TP_ARGS(local, ret),
-
TP_STRUCT__entry(
LOCAL_ENTRY
__field(int, ret)
),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+ TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
+);
+TRACE_EVENT(drv_return_u64,
+ TP_PROTO(struct ieee80211_local *local, u64 ret),
+ TP_ARGS(local, ret),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u64, ret)
+ ),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->ret = ret;
),
+ TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
+);
+
+TRACE_EVENT(drv_start,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
TP_printk(
LOCAL_PR_FMT, LOCAL_PR_ARG
@@ -76,28 +114,25 @@ TRACE_EVENT(drv_stop,
TRACE_EVENT(drv_add_interface,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- int ret),
+ struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata, ret),
+ TP_ARGS(local, sdata),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
__array(char, addr, 6)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
memcpy(__entry->addr, sdata->vif.addr, 6);
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
)
);
@@ -126,15 +161,13 @@ TRACE_EVENT(drv_remove_interface,
TRACE_EVENT(drv_config,
TP_PROTO(struct ieee80211_local *local,
- u32 changed,
- int ret),
+ u32 changed),
- TP_ARGS(local, changed, ret),
+ TP_ARGS(local, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u32, changed)
- __field(int, ret)
__field(u32, flags)
__field(int, power_level)
__field(int, dynamic_ps_timeout)
@@ -150,7 +183,6 @@ TRACE_EVENT(drv_config,
TP_fast_assign(
LOCAL_ASSIGN;
__entry->changed = changed;
- __entry->ret = ret;
__entry->flags = local->hw.conf.flags;
__entry->power_level = local->hw.conf.power_level;
__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
@@ -164,8 +196,8 @@ TRACE_EVENT(drv_config,
),
TP_printk(
- LOCAL_PR_FMT " ch:%#x freq:%d ret:%d",
- LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret
+ LOCAL_PR_FMT " ch:%#x freq:%d",
+ LOCAL_PR_ARG, __entry->changed, __entry->center_freq
)
);
@@ -219,52 +251,24 @@ TRACE_EVENT(drv_bss_info_changed,
)
);
-TRACE_EVENT(drv_configure_arp_filter,
- TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct in_ifaddr *ifa_list, int ret),
-
- TP_ARGS(local, sdata, ifa_list, ret),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- VIF_ENTRY
- __field(int, ret)
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- VIF_ASSIGN;
- __entry->ret = ret;
- ),
-
- TP_printk(
- VIF_PR_FMT LOCAL_PR_FMT " ret:%d",
- VIF_PR_ARG, LOCAL_PR_ARG, __entry->ret
- )
-);
-
TRACE_EVENT(drv_prepare_multicast,
- TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret),
+ TP_PROTO(struct ieee80211_local *local, int mc_count),
- TP_ARGS(local, mc_count, ret),
+ TP_ARGS(local, mc_count),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(int, mc_count)
- __field(u64, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->mc_count = mc_count;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " prepare mc (%d): %llx",
- LOCAL_PR_ARG, __entry->mc_count,
- (unsigned long long) __entry->ret
+ LOCAL_PR_FMT " prepare mc (%d)",
+ LOCAL_PR_ARG, __entry->mc_count
)
);
@@ -298,27 +302,25 @@ TRACE_EVENT(drv_configure_filter,
TRACE_EVENT(drv_set_tim,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta, bool set, int ret),
+ struct ieee80211_sta *sta, bool set),
- TP_ARGS(local, sta, set, ret),
+ TP_ARGS(local, sta, set),
TP_STRUCT__entry(
LOCAL_ENTRY
STA_ENTRY
__field(bool, set)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
STA_ASSIGN;
__entry->set = set;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d",
- LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret
+ LOCAL_PR_FMT STA_PR_FMT " set:%d",
+ LOCAL_PR_ARG, STA_PR_FMT, __entry->set
)
);
@@ -326,9 +328,9 @@ TRACE_EVENT(drv_set_key,
TP_PROTO(struct ieee80211_local *local,
enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key, int ret),
+ struct ieee80211_key_conf *key),
- TP_ARGS(local, cmd, sdata, sta, key, ret),
+ TP_ARGS(local, cmd, sdata, sta, key),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -338,7 +340,6 @@ TRACE_EVENT(drv_set_key,
__field(u8, hw_key_idx)
__field(u8, flags)
__field(s8, keyidx)
- __field(int, ret)
),
TP_fast_assign(
@@ -349,12 +350,11 @@ TRACE_EVENT(drv_set_key,
__entry->flags = key->flags;
__entry->keyidx = key->keyidx;
__entry->hw_key_idx = key->hw_key_idx;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
)
);
@@ -389,25 +389,23 @@ TRACE_EVENT(drv_update_tkip_key,
TRACE_EVENT(drv_hw_scan,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct cfg80211_scan_request *req, int ret),
+ struct cfg80211_scan_request *req),
- TP_ARGS(local, sdata, req, ret),
+ TP_ARGS(local, sdata, req),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " ret:%d",
- LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG,VIF_PR_ARG
)
);
@@ -504,48 +502,44 @@ TRACE_EVENT(drv_get_tkip_seq,
);
TRACE_EVENT(drv_set_rts_threshold,
- TP_PROTO(struct ieee80211_local *local, u32 value, int ret),
+ TP_PROTO(struct ieee80211_local *local, u32 value),
- TP_ARGS(local, value, ret),
+ TP_ARGS(local, value),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u32, value)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
__entry->value = value;
),
TP_printk(
- LOCAL_PR_FMT " value:%d ret:%d",
- LOCAL_PR_ARG, __entry->value, __entry->ret
+ LOCAL_PR_FMT " value:%d",
+ LOCAL_PR_ARG, __entry->value
)
);
TRACE_EVENT(drv_set_coverage_class,
- TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
+ TP_PROTO(struct ieee80211_local *local, u8 value),
- TP_ARGS(local, value, ret),
+ TP_ARGS(local, value),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u8, value)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
__entry->value = value;
),
TP_printk(
- LOCAL_PR_FMT " value:%d ret:%d",
- LOCAL_PR_ARG, __entry->value, __entry->ret
+ LOCAL_PR_FMT " value:%d",
+ LOCAL_PR_ARG, __entry->value
)
);
@@ -580,27 +574,25 @@ TRACE_EVENT(drv_sta_notify,
TRACE_EVENT(drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, int ret),
+ struct ieee80211_sta *sta),
- TP_ARGS(local, sdata, sta, ret),
+ TP_ARGS(local, sdata, sta),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
STA_ENTRY
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
)
);
@@ -631,10 +623,9 @@ TRACE_EVENT(drv_sta_remove,
TRACE_EVENT(drv_conf_tx,
TP_PROTO(struct ieee80211_local *local, u16 queue,
- const struct ieee80211_tx_queue_params *params,
- int ret),
+ const struct ieee80211_tx_queue_params *params),
- TP_ARGS(local, queue, params, ret),
+ TP_ARGS(local, queue, params),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -643,13 +634,11 @@ TRACE_EVENT(drv_conf_tx,
__field(u16, cw_min)
__field(u16, cw_max)
__field(u8, aifs)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->queue = queue;
- __entry->ret = ret;
__entry->txop = params->txop;
__entry->cw_max = params->cw_max;
__entry->cw_min = params->cw_min;
@@ -657,29 +646,27 @@ TRACE_EVENT(drv_conf_tx,
),
TP_printk(
- LOCAL_PR_FMT " queue:%d ret:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->ret
+ LOCAL_PR_FMT " queue:%d",
+ LOCAL_PR_ARG, __entry->queue
)
);
TRACE_EVENT(drv_get_tsf,
- TP_PROTO(struct ieee80211_local *local, u64 ret),
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, ret),
+ TP_ARGS(local),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(u64, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " ret:%llu",
- LOCAL_PR_ARG, (unsigned long long)__entry->ret
+ LOCAL_PR_FMT,
+ LOCAL_PR_ARG
)
);
@@ -723,23 +710,21 @@ TRACE_EVENT(drv_reset_tsf,
);
TRACE_EVENT(drv_tx_last_beacon,
- TP_PROTO(struct ieee80211_local *local, int ret),
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, ret),
+ TP_ARGS(local),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " ret:%d",
- LOCAL_PR_ARG, __entry->ret
+ LOCAL_PR_FMT,
+ LOCAL_PR_ARG
)
);
@@ -748,9 +733,9 @@ TRACE_EVENT(drv_ampdu_action,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, int ret),
+ u16 *ssn),
- TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
+ TP_ARGS(local, sdata, action, sta, tid, ssn),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -758,7 +743,6 @@ TRACE_EVENT(drv_ampdu_action,
__field(u32, action)
__field(u16, tid)
__field(u16, ssn)
- __field(int, ret)
VIF_ENTRY
),
@@ -766,15 +750,36 @@ TRACE_EVENT(drv_ampdu_action,
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->ret = ret;
__entry->action = action;
__entry->tid = tid;
__entry->ssn = ssn ? *ssn : 0;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
+ )
+);
+
+TRACE_EVENT(drv_get_survey,
+ TP_PROTO(struct ieee80211_local *local, int idx,
+ struct survey_info *survey),
+
+ TP_ARGS(local, idx, survey),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, idx)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->idx = idx;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " idx:%d",
+ LOCAL_PR_ARG, __entry->idx
)
);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 2ab106a..be928ef 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -6,7 +6,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2008, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -105,6 +105,8 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
{
int i;
+ cancel_work_sync(&sta->ampdu_mlme.work);
+
for (i = 0; i < STA_TID_NUM; i++) {
__ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR);
__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -112,6 +114,43 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
}
}
+void ieee80211_ba_session_work(struct work_struct *work)
+{
+ struct sta_info *sta =
+ container_of(work, struct sta_info, ampdu_mlme.work);
+ struct tid_ampdu_tx *tid_tx;
+ int tid;
+
+ /*
+ * When this flag is set, new sessions should be
+ * blocked, and existing sessions will be torn
+ * down by the code that set the flag, so this
+ * need not run.
+ */
+ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
+ return;
+
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ for (tid = 0; tid < STA_TID_NUM; tid++) {
+ if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
+ ___ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_TIMEOUT);
+
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+ if (!tid_tx)
+ continue;
+
+ if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
+ ieee80211_tx_ba_session_handle_start(sta, tid);
+ else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
+ &tid_tx->state))
+ ___ieee80211_stop_tx_ba_session(sta, tid,
+ WLAN_BACK_INITIATOR);
+ }
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+}
+
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code)
@@ -176,13 +215,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
if (initiator == WLAN_BACK_INITIATOR)
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
- else { /* WLAN_BACK_RECIPIENT */
- spin_lock_bh(&sta->lock);
- if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
- ___ieee80211_stop_tx_ba_session(sta, tid,
- WLAN_BACK_RECIPIENT);
- spin_unlock_bh(&sta->lock);
- }
+ else
+ __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT);
}
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index d7a96ce..d4e84b2 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -172,11 +172,13 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(ifibss->presp, skb);
sdata->vif.bss_conf.beacon_int = beacon_int;
+ sdata->vif.bss_conf.basic_rates = basic_rates;
bss_change = BSS_CHANGED_BEACON_INT;
bss_change |= ieee80211_reset_erp_info(sdata);
bss_change |= BSS_CHANGED_BSSID;
bss_change |= BSS_CHANGED_BEACON;
bss_change |= BSS_CHANGED_BEACON_ENABLED;
+ bss_change |= BSS_CHANGED_BASIC_RATES;
bss_change |= BSS_CHANGED_IBSS;
sdata->vif.bss_conf.ibss_joined = true;
ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -529,7 +531,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
sdata->drop_unencrypted = 0;
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
- ifibss->channel, 3, /* first two are basic */
+ ifibss->channel, ifibss->basic_rates,
capability, 0);
}
@@ -727,8 +729,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
}
-static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
@@ -754,33 +756,11 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
break;
}
-
- kfree_skb(skb);
}
-static void ieee80211_ibss_work(struct work_struct *work)
+void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.ibss.work);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_ibss *ifibss;
- struct sk_buff *skb;
-
- if (WARN_ON(local->suspended))
- return;
-
- if (!ieee80211_sdata_running(sdata))
- return;
-
- if (local->scanning)
- return;
-
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC))
- return;
- ifibss = &sdata->u.ibss;
-
- while ((skb = skb_dequeue(&ifibss->skb_queue)))
- ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request))
return;
@@ -804,7 +784,7 @@ static void ieee80211_queue_ibss_work(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
- ieee80211_queue_work(&local->hw, &ifibss->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
static void ieee80211_ibss_timer(unsigned long data)
@@ -827,7 +807,6 @@ void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- cancel_work_sync(&ifibss->work);
if (del_timer_sync(&ifibss->timer))
ifibss->timer_running = true;
}
@@ -847,10 +826,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- INIT_WORK(&ifibss->work, ieee80211_ibss_work);
setup_timer(&ifibss->timer, ieee80211_ibss_timer,
(unsigned long) sdata);
- skb_queue_head_init(&ifibss->skb_queue);
}
/* scan finished notification */
@@ -872,32 +849,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
mutex_unlock(&local->iflist_mtx);
}
-ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_mgmt *mgmt;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_PROBE_REQ:
- case IEEE80211_STYPE_AUTH:
- skb_queue_tail(&sdata->u.ibss.skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->u.ibss.work);
- return RX_QUEUED;
- }
-
- return RX_DROP_MONITOR;
-}
-
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params)
{
@@ -910,6 +861,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.fixed_bssid = false;
sdata->u.ibss.privacy = params->privacy;
+ sdata->u.ibss.basic_rates = params->basic_rates;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
@@ -957,7 +909,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(sdata->local);
set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
return 0;
}
@@ -965,10 +917,35 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
{
struct sk_buff *skb;
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ struct cfg80211_bss *cbss;
+ u16 capability;
+ int active_ibss = 0;
+
+ active_ibss = ieee80211_sta_active_ibss(sdata);
+
+ if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+ capability = WLAN_CAPABILITY_IBSS;
+
+ if (ifibss->privacy)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel,
+ ifibss->bssid, ifibss->ssid,
+ ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+ WLAN_CAPABILITY_PRIVACY,
+ capability);
+
+ if (cbss) {
+ cfg80211_unlink_bss(local->hw.wiphy, cbss);
+ cfg80211_put_bss(cbss);
+ }
+ }
del_timer_sync(&sdata->u.ibss.timer);
clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
- cancel_work_sync(&sdata->u.ibss.work);
+ cancel_work_sync(&sdata->work);
clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
sta_info_flush(sdata->local, sdata);
@@ -983,7 +960,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
synchronize_rcu();
kfree_skb(skb);
- skb_queue_purge(&sdata->u.ibss.skb_queue);
+ skb_queue_purge(&sdata->skb_queue);
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
sdata->u.ibss.ssid_len = 0;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 1e779e8..a3649a8 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -325,7 +325,6 @@ struct ieee80211_if_managed {
struct timer_list conn_mon_timer;
struct timer_list bcn_mon_timer;
struct timer_list chswitch_timer;
- struct work_struct work;
struct work_struct monitor_work;
struct work_struct chswitch_work;
struct work_struct beacon_connection_loss_work;
@@ -340,8 +339,6 @@ struct ieee80211_if_managed {
u16 aid;
- struct sk_buff_head skb_queue;
-
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
enum ieee80211_smps_mode req_smps, /* requested smps mode */
@@ -386,13 +383,12 @@ enum ieee80211_ibss_request {
struct ieee80211_if_ibss {
struct timer_list timer;
- struct work_struct work;
-
- struct sk_buff_head skb_queue;
unsigned long request;
unsigned long last_scan_completed;
+ u32 basic_rates;
+
bool timer_running;
bool fixed_bssid;
@@ -416,11 +412,9 @@ struct ieee80211_if_ibss {
};
struct ieee80211_if_mesh {
- struct work_struct work;
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
struct timer_list mesh_path_root_timer;
- struct sk_buff_head skb_queue;
unsigned long timers_running;
@@ -517,6 +511,11 @@ struct ieee80211_sub_if_data {
u16 sequence_number;
+ struct work_struct work;
+ struct sk_buff_head skb_queue;
+
+ bool arp_filter_state;
+
/*
* AP this belongs to: self in AP mode and
* corresponding AP in VLAN mode, NULL for
@@ -569,11 +568,15 @@ ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
#endif
}
+enum sdata_queue_type {
+ IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
+ IEEE80211_SDATA_QUEUE_AGG_START = 1,
+ IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
+};
+
enum {
IEEE80211_RX_MSG = 1,
IEEE80211_TX_STATUS_MSG = 2,
- IEEE80211_DELBA_MSG = 3,
- IEEE80211_ADDBA_MSG = 4,
};
enum queue_stop_reason {
@@ -724,13 +727,7 @@ struct ieee80211_local {
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
- /*
- * This lock is used to prevent concurrent A-MPDU
- * session start/stop processing, this thus also
- * synchronises the ->ampdu_action() callback to
- * drivers and limits it to one at a time.
- */
- spinlock_t ampdu_lock;
+ atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
/* number of interfaces with corresponding IFF_ flags */
atomic_t iff_allmultis, iff_promiscs;
@@ -853,6 +850,14 @@ struct ieee80211_local {
struct notifier_block network_latency_notifier;
struct notifier_block ifa_notifier;
+ /*
+ * The dynamic ps timeout configured from user space via WEXT -
+ * this will override whatever chosen by mac80211 internally.
+ */
+ int dynamic_ps_forced_timeout;
+ int dynamic_ps_user_timeout;
+ bool disable_dynamic_ps;
+
int user_power_level; /* in dBm */
int power_constr_level; /* in dBm */
@@ -875,9 +880,8 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
return netdev_priv(dev);
}
-/* this struct represents 802.11n's RA/TID combination along with our vif */
+/* this struct represents 802.11n's RA/TID combination */
struct ieee80211_ra_tid {
- struct ieee80211_vif *vif;
u8 ra[ETH_ALEN];
u16 tid;
};
@@ -986,13 +990,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_disassoc_request *req,
void *cookie);
-int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid,
- const u8 *buf, size_t len, u64 *cookie);
-ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
@@ -1005,12 +1002,13 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
u64 timestamp);
void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
u8 *bssid, u8 *addr, u32 supp_rates,
gfp_t gfp);
@@ -1019,6 +1017,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+
+/* mesh code */
+void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
/* scan/BSS handling */
void ieee80211_scan_work(struct work_struct *work);
@@ -1102,6 +1108,8 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid);
+void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason);
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason);
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1121,6 +1129,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
+void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
+void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_ba_session_work(struct work_struct *work);
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 1afa9ec..910729f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -339,7 +339,6 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
unsigned long flags;
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
@@ -356,18 +355,6 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_work_purge(sdata);
/*
- * Now delete all active aggregation sessions.
- */
- rcu_read_lock();
-
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (sta->sdata == sdata)
- ieee80211_sta_tear_down_BA_sessions(sta);
- }
-
- rcu_read_unlock();
-
- /*
* Remove all stations associated with this interface.
*
* This must be done before calling ops->remove_interface()
@@ -473,27 +460,14 @@ static int ieee80211_stop(struct net_device *dev)
* whether the interface is running, which, at this point,
* it no longer is.
*/
- cancel_work_sync(&sdata->u.mgd.work);
cancel_work_sync(&sdata->u.mgd.chswitch_work);
cancel_work_sync(&sdata->u.mgd.monitor_work);
cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
- /*
- * When we get here, the interface is marked down.
- * Call synchronize_rcu() to wait for the RX path
- * should it be using the interface and enqueuing
- * frames at this very time on another CPU.
- */
- synchronize_rcu();
- skb_queue_purge(&sdata->u.mgd.skb_queue);
/* fall through */
case NL80211_IFTYPE_ADHOC:
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
del_timer_sync(&sdata->u.ibss.timer);
- cancel_work_sync(&sdata->u.ibss.work);
- synchronize_rcu();
- skb_queue_purge(&sdata->u.ibss.skb_queue);
- }
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -508,6 +482,16 @@ static int ieee80211_stop(struct net_device *dev)
}
/* fall through */
default:
+ flush_work(&sdata->work);
+ /*
+ * When we get here, the interface is marked down.
+ * Call synchronize_rcu() to wait for the RX path
+ * should it be using the interface and enqueuing
+ * frames at this very time on another CPU.
+ */
+ synchronize_rcu();
+ skb_queue_purge(&sdata->skb_queue);
+
if (local->scan_sdata == sdata)
ieee80211_scan_cancel(local);
@@ -717,6 +701,136 @@ static void ieee80211_if_setup(struct net_device *dev)
dev->destructor = free_netdev;
}
+static void ieee80211_iface_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data, work);
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct sta_info *sta;
+ struct ieee80211_ra_tid *ra_tid;
+
+ if (!ieee80211_sdata_running(sdata))
+ return;
+
+ if (local->scanning)
+ return;
+
+ /*
+ * ieee80211_queue_work() should have picked up most cases,
+ * here we'll pick the rest.
+ */
+ if (WARN(local->suspended,
+ "interface work scheduled while going to suspend\n"))
+ return;
+
+ /* first process frames */
+ while ((skb = skb_dequeue(&sdata->skb_queue))) {
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
+ ra_tid = (void *)&skb->cb;
+ ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
+ ra_tid->tid);
+ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
+ ra_tid = (void *)&skb->cb;
+ ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
+ ra_tid->tid);
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK) {
+ int len = skb->len;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (sta) {
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ ieee80211_process_addba_request(
+ local, sta, mgmt, len);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ ieee80211_process_addba_resp(local, sta,
+ mgmt, len);
+ break;
+ case WLAN_ACTION_DELBA:
+ ieee80211_process_delba(sdata, sta,
+ mgmt, len);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
+ mutex_unlock(&local->sta_mtx);
+ } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *)mgmt;
+ /*
+ * So the frame isn't mgmt, but frame_control
+ * is at the right place anyway, of course, so
+ * the if statement is correct.
+ *
+ * Warn if we have other data frame types here,
+ * they must not get here.
+ */
+ WARN_ON(hdr->frame_control &
+ cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
+ WARN_ON(!(hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)));
+ /*
+ * This was a fragment of a frame, received while
+ * a block-ack session was active. That cannot be
+ * right, so terminate the session.
+ */
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (sta) {
+ u16 tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TID_MASK;
+
+ __ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
+ }
+ mutex_unlock(&local->sta_mtx);
+ } else switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ieee80211_sta_rx_queued_mgmt(sdata, skb);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ ieee80211_mesh_rx_queued_mgmt(sdata, skb);
+ break;
+ default:
+ WARN(1, "frame for unexpected interface type");
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+
+ /* then other type-dependent work */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ieee80211_sta_work(sdata);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_work(sdata);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ ieee80211_mesh_work(sdata);
+ break;
+ default:
+ break;
+ }
+}
+
+
/*
* Helper function to initialise an interface to a specific type.
*/
@@ -734,6 +848,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
/* only monitor differs */
sdata->dev->type = ARPHRD_ETHER;
+ skb_queue_head_init(&sdata->skb_queue);
+ INIT_WORK(&sdata->work, ieee80211_iface_work);
+
switch (type) {
case NL80211_IFTYPE_AP:
skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
@@ -959,6 +1076,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
sdata->dev = ndev;
+#ifdef CONFIG_INET
+ sdata->arp_filter_state = true;
+#endif
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
skb_queue_head_init(&sdata->fragments[i].skb_list);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index d0d9001..50d1cff 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -273,7 +273,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
key->conf.iv_len = CCMP_HDR_LEN;
key->conf.icv_len = CCMP_MIC_LEN;
if (seq) {
- for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
+ for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++)
for (j = 0; j < CCMP_PN_LEN; j++)
key->u.ccmp.rx_pn[i][j] =
seq[CCMP_PN_LEN - j - 1];
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 9996e3b..a3849fa 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -77,7 +77,13 @@ struct ieee80211_key {
} tkip;
struct {
u8 tx_pn[6];
- u8 rx_pn[NUM_RX_DATA_QUEUES][6];
+ /*
+ * Last received packet number. The first
+ * NUM_RX_DATA_QUEUES counters are used with Data
+ * frames and the last counter is used with Robust
+ * Management frames.
+ */
+ u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6];
struct crypto_cipher *tfm;
u32 replays; /* dot11RSNAStatsCCMPReplays */
/* scratch buffers for virt_to_page() (crypto API) */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 88b671a..edf7aff 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,6 +20,7 @@
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
#include <linux/pm_qos_params.h>
+#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -259,7 +260,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
struct sk_buff *skb;
- struct ieee80211_ra_tid *ra_tid;
while ((skb = skb_dequeue(&local->skb_queue)) ||
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
@@ -274,18 +274,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
skb->pkt_type = 0;
ieee80211_tx_status(local_to_hw(local), skb);
break;
- case IEEE80211_DELBA_MSG:
- ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra,
- ra_tid->tid);
- dev_kfree_skb(skb);
- break;
- case IEEE80211_ADDBA_MSG:
- ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra,
- ra_tid->tid);
- dev_kfree_skb(skb);
- break ;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
@@ -330,23 +318,6 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
}
#ifdef CONFIG_INET
-int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata)
-{
- struct in_device *idev;
- int ret = 0;
-
- BUG_ON(!sdata);
- ASSERT_RTNL();
-
- idev = sdata->dev->ip_ptr;
- if (!idev)
- return 0;
-
- ret = drv_configure_arp_filter(sdata->local, &sdata->vif,
- idev->ifa_list);
- return ret;
-}
-
static int ieee80211_ifa_changed(struct notifier_block *nb,
unsigned long data, void *arg)
{
@@ -356,8 +327,11 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
ifa_notifier);
struct net_device *ndev = ifa->ifa_dev->dev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct in_device *idev;
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_bss_conf *bss_conf;
struct ieee80211_if_managed *ifmgd;
+ int c = 0;
if (!netif_running(ndev))
return NOTIFY_DONE;
@@ -369,17 +343,44 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
if (wdev->wiphy != local->hw.wiphy)
return NOTIFY_DONE;
- /* We are concerned about IP addresses only when associated */
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
+ bss_conf = &sdata->vif.bss_conf;
/* ARP filtering is only supported in managed mode */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
+ idev = sdata->dev->ip_ptr;
+ if (!idev)
+ return NOTIFY_DONE;
+
ifmgd = &sdata->u.mgd;
mutex_lock(&ifmgd->mtx);
- if (ifmgd->associated)
- ieee80211_set_arp_filter(sdata);
+
+ /* Copy the addresses to the bss_conf list */
+ ifa = idev->ifa_list;
+ while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) {
+ bss_conf->arp_addr_list[c] = ifa->ifa_address;
+ ifa = ifa->ifa_next;
+ c++;
+ }
+
+ /* If not all addresses fit the list, disable filtering */
+ if (ifa) {
+ sdata->arp_filter_state = false;
+ c = 0;
+ } else {
+ sdata->arp_filter_state = true;
+ }
+ bss_conf->arp_addr_cnt = c;
+
+ /* Configure driver only if associated */
+ if (ifmgd->associated) {
+ bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_ARP_FILTER);
+ }
+
mutex_unlock(&ifmgd->mtx);
return NOTIFY_DONE;
@@ -476,8 +477,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
sta_info_init(local);
- for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_head_init(&local->pending[i]);
+ atomic_set(&local->agg_queue_stop[i], 0);
+ }
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
@@ -488,8 +491,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
- spin_lock_init(&local->ampdu_lock);
-
return local_to_hw(local);
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -629,7 +630,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.conf.listen_interval = local->hw.max_listen_interval;
- local->hw.conf.dynamic_ps_forced_timeout = -1;
+ local->dynamic_ps_forced_timeout = -1;
result = sta_info_start(local);
if (result < 0)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index bde8103..c8a4f19 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -54,7 +54,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
/**
@@ -345,7 +345,7 @@ static void ieee80211_mesh_path_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
static void ieee80211_mesh_path_root_timer(unsigned long data)
@@ -362,7 +362,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -484,9 +484,6 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- /* might restart the timer but that doesn't matter */
- cancel_work_sync(&ifmsh->work);
-
/* use atomic bitops in case both timers fire at the same time */
if (del_timer_sync(&ifmsh->housekeeping_timer))
@@ -518,7 +515,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED |
@@ -536,16 +533,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
* whether the interface is running, which, at this point,
* it no longer is.
*/
- cancel_work_sync(&sdata->u.mesh.work);
-
- /*
- * When we get here, the interface is marked down.
- * Call synchronize_rcu() to wait for the RX path
- * should it be using the interface and enqueuing
- * frames at this very time on another CPU.
- */
- rcu_barrier(); /* Wait for RX path and call_rcu()'s */
- skb_queue_purge(&sdata->u.mesh.skb_queue);
+ cancel_work_sync(&sdata->work);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@ -608,8 +596,8 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
}
}
-static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status;
struct ieee80211_if_mesh *ifmsh;
@@ -632,26 +620,11 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
break;
}
-
- kfree_skb(skb);
}
-static void ieee80211_mesh_work(struct work_struct *work)
+void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
- struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct sk_buff *skb;
-
- if (!ieee80211_sdata_running(sdata))
- return;
-
- if (local->scanning)
- return;
-
- while ((skb = skb_dequeue(&ifmsh->skb_queue)))
- ieee80211_mesh_rx_queued_mgmt(sdata, skb);
if (ifmsh->preq_queue_len &&
time_after(jiffies,
@@ -678,7 +651,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
if (ieee80211_vif_is_mesh(&sdata->vif))
- ieee80211_queue_work(&local->hw, &sdata->u.mesh.work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
rcu_read_unlock();
}
@@ -686,11 +659,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
setup_timer(&ifmsh->housekeeping_timer,
ieee80211_mesh_housekeeping_timer,
(unsigned long) sdata);
- skb_queue_head_init(&sdata->u.mesh.skb_queue);
ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
@@ -731,29 +702,3 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
}
-
-ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_mgmt *mgmt;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_ACTION:
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_BEACON:
- skb_queue_tail(&ifmsh->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
- return RX_QUEUED;
- }
-
- return RX_CONTINUE;
-}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c88087f..ebd3f1d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -237,8 +237,6 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
struct sta_info *stainfo, struct sk_buff *skb);
void ieee80211s_stop(void);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 0705018..829e08a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -805,14 +805,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
spin_unlock(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
- ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
else if (time_before(jiffies, ifmsh->last_preq)) {
/* avoid long wait if did not send preqs for a long time
* and jiffies wrapped around
*/
ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
- ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
} else
mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
min_preq_int_jiff(sdata));
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 181ffd6..349e466 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -315,7 +315,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
read_unlock(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
@@ -425,7 +425,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
read_unlock(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3cd5f7b..ea13a80 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -65,7 +65,6 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
{
atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
mesh_accept_plinks_update(sdata);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
}
static inline
@@ -73,7 +72,6 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
{
atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
mesh_accept_plinks_update(sdata);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
}
/**
@@ -115,7 +113,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
}
/**
- * mesh_plink_deactivate - deactivate mesh peer link
+ * __mesh_plink_deactivate - deactivate mesh peer link
*
* @sta: mesh peer link to deactivate
*
@@ -123,18 +121,23 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
*
* Locking: the caller must hold sta->lock
*/
-static void __mesh_plink_deactivate(struct sta_info *sta)
+static bool __mesh_plink_deactivate(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ bool deactivated = false;
- if (sta->plink_state == PLINK_ESTAB)
+ if (sta->plink_state == PLINK_ESTAB) {
mesh_plink_dec_estab_count(sdata);
+ deactivated = true;
+ }
sta->plink_state = PLINK_BLOCKED;
mesh_path_flush_by_nexthop(sta);
+
+ return deactivated;
}
/**
- * __mesh_plink_deactivate - deactivate mesh peer link
+ * mesh_plink_deactivate - deactivate mesh peer link
*
* @sta: mesh peer link to deactivate
*
@@ -142,9 +145,15 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
*/
void mesh_plink_deactivate(struct sta_info *sta)
{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ bool deactivated;
+
spin_lock_bh(&sta->lock);
- __mesh_plink_deactivate(sta);
+ deactivated = __mesh_plink_deactivate(sta);
spin_unlock_bh(&sta->lock);
+
+ if (deactivated)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
}
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -381,10 +390,16 @@ int mesh_plink_open(struct sta_info *sta)
void mesh_plink_block(struct sta_info *sta)
{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ bool deactivated;
+
spin_lock_bh(&sta->lock);
- __mesh_plink_deactivate(sta);
+ deactivated = __mesh_plink_deactivate(sta);
sta->plink_state = PLINK_BLOCKED;
spin_unlock_bh(&sta->lock);
+
+ if (deactivated)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
}
@@ -397,6 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
enum plink_event event;
enum plink_frame_type ftype;
size_t baselen;
+ bool deactivated;
u8 ie_len;
u8 *baseaddr;
__le16 plid, llid, reason;
@@ -651,8 +667,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case CNF_ACPT:
del_timer(&sta->plink_timer);
sta->plink_state = PLINK_ESTAB;
- mesh_plink_inc_estab_count(sdata);
spin_unlock_bh(&sta->lock);
+ mesh_plink_inc_estab_count(sdata);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
break;
@@ -684,8 +701,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case OPN_ACPT:
del_timer(&sta->plink_timer);
sta->plink_state = PLINK_ESTAB;
- mesh_plink_inc_estab_count(sdata);
spin_unlock_bh(&sta->lock);
+ mesh_plink_inc_estab_count(sdata);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
@@ -702,11 +720,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case CLS_ACPT:
reason = cpu_to_le16(MESH_CLOSE_RCVD);
sta->reason = reason;
- __mesh_plink_deactivate(sta);
+ deactivated = __mesh_plink_deactivate(sta);
sta->plink_state = PLINK_HOLDING;
llid = sta->llid;
mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
spin_unlock_bh(&sta->lock);
+ if (deactivated)
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
plid, reason);
break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 8fb85c3..d196265 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -478,6 +478,39 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
}
}
+void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_conf *conf = &local->hw.conf;
+
+ WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
+ (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
+
+ local->disable_dynamic_ps = false;
+ conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout;
+}
+EXPORT_SYMBOL(ieee80211_enable_dyn_ps);
+
+void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_conf *conf = &local->hw.conf;
+
+ WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
+ (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
+
+ local->disable_dynamic_ps = true;
+ conf->dynamic_ps_timeout = 0;
+ del_timer_sync(&local->dynamic_ps_timer);
+ ieee80211_queue_work(&local->hw,
+ &local->dynamic_ps_enable_work);
+}
+EXPORT_SYMBOL(ieee80211_disable_dyn_ps);
+
/* powersave */
static void ieee80211_enable_ps(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
@@ -553,6 +586,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
found->u.mgd.associated->beacon_ies &&
!(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL))) {
+ struct ieee80211_conf *conf = &local->hw.conf;
s32 beaconint_us;
if (latency < 0)
@@ -561,25 +595,24 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
beaconint_us = ieee80211_tu_to_usec(
found->vif.bss_conf.beacon_int);
- timeout = local->hw.conf.dynamic_ps_forced_timeout;
+ timeout = local->dynamic_ps_forced_timeout;
if (timeout < 0) {
/*
+ * Go to full PSM if the user configures a very low
+ * latency requirement.
* The 2 second value is there for compatibility until
* the PM_QOS_NETWORK_LATENCY is configured with real
* values.
*/
- if (latency == 2000000000)
- timeout = 100;
- else if (latency <= 50000)
- timeout = 300;
- else if (latency <= 100000)
- timeout = 100;
- else if (latency <= 500000)
- timeout = 50;
- else
+ if (latency > 1900000000 && latency != 2000000000)
timeout = 0;
+ else
+ timeout = 100;
}
- local->hw.conf.dynamic_ps_timeout = timeout;
+ local->dynamic_ps_user_timeout = timeout;
+ if (!local->disable_dynamic_ps)
+ conf->dynamic_ps_timeout =
+ local->dynamic_ps_user_timeout;
if (beaconint_us > latency) {
local->ps_sdata = NULL;
@@ -806,11 +839,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
bss_info_changed |= BSS_CHANGED_ASSOC;
/* set timing information */
- sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
- sdata->vif.bss_conf.timestamp = cbss->tsf;
+ bss_conf->beacon_int = cbss->beacon_interval;
+ bss_conf->timestamp = cbss->tsf;
bss_info_changed |= BSS_CHANGED_BEACON_INT;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -835,7 +869,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_led_assoc(local, 1);
- sdata->vif.bss_conf.assoc = 1;
+ bss_conf->assoc = 1;
/*
* For now just always ask the driver to update the basic rateset
* when we have associated, we aren't checking whether it actually
@@ -848,9 +882,15 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
/* Tell the driver to monitor connection quality (if supported) */
if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
- sdata->vif.bss_conf.cqm_rssi_thold)
+ bss_conf->cqm_rssi_thold)
bss_info_changed |= BSS_CHANGED_CQM;
+ /* Enable ARP filtering */
+ if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) {
+ bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+ bss_info_changed |= BSS_CHANGED_ARP_FILTER;
+ }
+
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
mutex_lock(&local->iflist_mtx);
@@ -898,13 +938,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
netif_tx_stop_all_queues(sdata->dev);
netif_carrier_off(sdata->dev);
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
if (sta) {
- set_sta_flags(sta, WLAN_STA_DISASSOC);
+ set_sta_flags(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
changed |= ieee80211_reset_erp_info(sdata);
@@ -932,6 +972,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, config_changed);
+ /* Disable ARP filtering */
+ if (sdata->vif.bss_conf.arp_filter_enabled) {
+ sdata->vif.bss_conf.arp_filter_enabled = false;
+ changed |= BSS_CHANGED_ARP_FILTER;
+ }
+
/* The BSSID (not really interesting) and HT changed */
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
@@ -1633,35 +1679,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_bss_info_change_notify(sdata, changed);
}
-ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_mgmt *mgmt;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_DEAUTH:
- case IEEE80211_STYPE_DISASSOC:
- case IEEE80211_STYPE_ACTION:
- skb_queue_tail(&sdata->u.mgd.skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
- return RX_QUEUED;
- }
-
- return RX_DROP_MONITOR;
-}
-
-static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_rx_status *rx_status;
@@ -1693,44 +1712,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
break;
case IEEE80211_STYPE_ACTION:
switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_BACK: {
- struct ieee80211_local *local = sdata->local;
- int len = skb->len;
- struct sta_info *sta;
-
- rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->sa);
- if (!sta) {
- rcu_read_unlock();
- break;
- }
-
- local_bh_disable();
-
- switch (mgmt->u.action.u.addba_req.action_code) {
- case WLAN_ACTION_ADDBA_REQ:
- if (len < (IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.addba_req)))
- break;
- ieee80211_process_addba_request(local, sta, mgmt, len);
- break;
- case WLAN_ACTION_ADDBA_RESP:
- if (len < (IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.addba_resp)))
- break;
- ieee80211_process_addba_resp(local, sta, mgmt, len);
- break;
- case WLAN_ACTION_DELBA:
- if (len < (IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.delba)))
- break;
- ieee80211_process_delba(sdata, sta, mgmt, len);
- break;
- }
- local_bh_enable();
- rcu_read_unlock();
- break;
- }
case WLAN_CATEGORY_SPECTRUM_MGMT:
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
@@ -1754,7 +1735,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
default:
WARN(1, "unexpected: %d", rma);
}
- goto out;
+ return;
}
mutex_unlock(&ifmgd->mtx);
@@ -1799,8 +1780,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
}
- out:
- kfree_skb(skb);
}
static void ieee80211_sta_timer(unsigned long data)
@@ -1815,39 +1794,13 @@ static void ieee80211_sta_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmgd->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
-static void ieee80211_sta_work(struct work_struct *work)
+void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.mgd.work);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd;
- struct sk_buff *skb;
-
- if (!ieee80211_sdata_running(sdata))
- return;
-
- if (local->scanning)
- return;
-
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
- return;
-
- /*
- * ieee80211_queue_work() should have picked up most cases,
- * here we'll pick the rest.
- */
- if (WARN(local->suspended, "STA MLME work scheduled while "
- "going to suspend\n"))
- return;
-
- ifmgd = &sdata->u.mgd;
-
- /* first process frames to avoid timing out while a frame is pending */
- while ((skb = skb_dequeue(&ifmgd->skb_queue)))
- ieee80211_sta_rx_queued_mgmt(sdata, skb);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/* then process the rest of the work */
mutex_lock(&ifmgd->mtx);
@@ -1942,8 +1895,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.monitor_work);
/* and do all the other regular work too */
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->u.mgd.work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
}
@@ -1958,7 +1910,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
* time -- the code here is properly synchronised.
*/
- cancel_work_sync(&ifmgd->work);
cancel_work_sync(&ifmgd->beacon_connection_loss_work);
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1990,7 +1941,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd;
ifmgd = &sdata->u.mgd;
- INIT_WORK(&ifmgd->work, ieee80211_sta_work);
INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
INIT_WORK(&ifmgd->beacon_connection_loss_work,
@@ -2003,7 +1953,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
(unsigned long) sdata);
- skb_queue_head_init(&ifmgd->skb_queue);
ifmgd->flags = 0;
@@ -2152,18 +2101,9 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
cfg80211_send_assoc_timeout(wk->sdata->dev,
wk->filter_ta);
return WORK_DONE_DESTROY;
- } else {
- mutex_unlock(&wk->sdata->u.mgd.mtx);
-#ifdef CONFIG_INET
- /*
- * configure ARP filter IP addresses to the driver,
- * intentionally outside the mgd mutex.
- */
- rtnl_lock();
- ieee80211_set_arp_filter(wk->sdata);
- rtnl_unlock();
-#endif
}
+
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
}
cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
@@ -2292,14 +2232,16 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_work *wk;
- const u8 *bssid = req->bss->bssid;
+ u8 bssid[ETH_ALEN];
+ bool assoc_bss = false;
mutex_lock(&ifmgd->mtx);
+ memcpy(bssid, req->bss->bssid, ETH_ALEN);
if (ifmgd->associated == req->bss) {
- bssid = req->bss->bssid;
- ieee80211_set_disassoc(sdata, true);
+ ieee80211_set_disassoc(sdata, false);
mutex_unlock(&ifmgd->mtx);
+ assoc_bss = true;
} else {
bool not_auth_yet = false;
@@ -2345,6 +2287,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
req->reason_code, cookie,
!req->local_state_change);
+ if (assoc_bss)
+ sta_info_destroy_addr(sdata, bssid);
ieee80211_recalc_idle(sdata->local);
@@ -2389,44 +2333,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return 0;
}
-int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid,
- const u8 *buf, size_t len, u64 *cookie)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct sk_buff *skb;
-
- /* Check that we are on the requested channel for transmission */
- if (chan != local->tmp_channel &&
- chan != local->oper_channel)
- return -EBUSY;
- if (channel_type_valid &&
- (channel_type != local->tmp_channel_type &&
- channel_type != local->_oper_channel_type))
- return -EBUSY;
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- memcpy(skb_put(skb, len), buf, len);
-
- if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
- IEEE80211_SKB_CB(skb)->flags |=
- IEEE80211_TX_INTFL_DONT_ENCRYPT;
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
- IEEE80211_TX_CTL_REQ_TX_STATUS;
- skb->dev = sdata->dev;
- ieee80211_tx_skb(sdata, skb);
-
- *cookie = (unsigned long) skb;
- return 0;
-}
-
void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
enum nl80211_cqm_rssi_threshold_event rssi_event,
gfp_t gfp)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 75202b2..d287fde 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -40,22 +40,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
list_for_each_entry(sdata, &local->interfaces, list)
ieee80211_disable_keys(sdata);
- /* Tear down aggregation sessions */
-
- rcu_read_lock();
-
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ /* tear down aggregation sessions and remove STAs */
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
set_sta_flags(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta);
}
- }
- rcu_read_unlock();
-
- /* remove STAs */
- mutex_lock(&local->sta_mtx);
- list_for_each_entry(sta, &local->sta_list, list) {
if (sta->uploaded) {
sdata = sta->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -72,6 +64,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
/* remove all interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
+ cancel_work_sync(&sdata->work);
+
switch(sdata->vif.type) {
case NL80211_IFTYPE_STATION:
ieee80211_sta_quiesce(sdata);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index c23f082..52c8503 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -328,7 +328,8 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
}
static void
-minstrel_downgrade_rate(struct minstrel_ht_sta *mi, int *idx, bool primary)
+minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
+ bool primary)
{
int group, orig_group;
@@ -365,7 +366,7 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru
return;
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- if (likely(sta->ampdu_mlme.tid_state_tx[tid] != HT_AGG_STATE_IDLE))
+ if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
ieee80211_start_tx_ba_session(pubsta, tid);
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 696c0fc..462d2b2 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -29,6 +29,8 @@ struct mcs_group {
unsigned int duration[MCS_GROUP_RATES];
};
+extern const struct mcs_group minstrel_mcs_groups[];
+
struct minstrel_rate_stats {
/* current / last sampling period attempts/success counters */
unsigned int attempts, last_attempts;
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 4fb3ccb..4a5a4b3 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -14,8 +14,6 @@
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
-extern const struct mcs_group minstrel_mcs_groups[];
-
static int
minstrel_ht_stats_open(struct inode *inode, struct file *file)
{
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6a15632..fa0f37e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -293,7 +293,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = prev_dev;
- netif_rx(skb2);
+ netif_receive_skb(skb2);
}
}
@@ -304,7 +304,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (prev_dev) {
skb->dev = prev_dev;
- netif_rx(skb);
+ netif_receive_skb(skb);
} else
dev_kfree_skb(skb);
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- spin_lock(&sta->lock);
-
- if (!sta->ampdu_mlme.tid_active_rx[tid])
- goto dont_reorder_unlock;
-
- tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
+ goto dont_reorder;
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
- goto dont_reorder_unlock;
+ goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
if (sc & IEEE80211_SCTL_FRAG) {
- spin_unlock(&sta->lock);
- __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
- WLAN_REASON_QSTA_REQUIRE_SETUP);
- dev_kfree_skb(skb);
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&rx->sdata->skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &rx->sdata->work);
return;
}
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
- spin_unlock(&sta->lock);
+ /*
+ * No locking needed -- we will only ever process one
+ * RX packet at a time, and thus own tid_agg_rx. All
+ * other code manipulating it needs to (and does) make
+ * sure that we cannot get to it any more before doing
+ * anything with it.
+ */
+ if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
return;
- }
- dont_reorder_unlock:
- spin_unlock(&sta->lock);
dont_reorder:
__skb_queue_tail(frames, skb);
}
@@ -1268,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
rx->queue, &(rx->skb));
if (rx->key && rx->key->conf.alg == ALG_CCMP &&
ieee80211_has_protected(fc)) {
+ int queue = ieee80211_is_mgmt(fc) ?
+ NUM_RX_DATA_QUEUES : rx->queue;
/* Store CCMP PN so that we can verify that the next
* fragment has a sequential PN value. */
entry->ccmp = 1;
memcpy(entry->last_pn,
- rx->key->u.ccmp.rx_pn[rx->queue],
+ rx->key->u.ccmp.rx_pn[queue],
CCMP_PN_LEN);
}
return RX_QUEUED;
@@ -1292,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (entry->ccmp) {
int i;
u8 pn[CCMP_PN_LEN], *rpn;
+ int queue;
if (!rx->key || rx->key->conf.alg != ALG_CCMP)
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, CCMP_PN_LEN);
@@ -1300,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (pn[i])
break;
}
- rpn = rx->key->u.ccmp.rx_pn[rx->queue];
+ queue = ieee80211_is_mgmt(fc) ?
+ NUM_RX_DATA_QUEUES : rx->queue;
+ rpn = rx->key->u.ccmp.rx_pn[queue];
if (memcmp(pn, rpn, CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
memcpy(entry->last_pn, pn, CCMP_PN_LEN);
@@ -1574,7 +1578,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
+ netif_receive_skb(skb);
}
}
@@ -1830,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
&bar_data, sizeof(bar_data)))
return RX_DROP_MONITOR;
- spin_lock(&rx->sta->lock);
tid = le16_to_cpu(bar_data.control) >> 12;
- if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
- spin_unlock(&rx->sta->lock);
+
+ tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
return RX_DROP_MONITOR;
- }
- tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
@@ -1849,7 +1851,6 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
frames);
kfree_skb(skb);
- spin_unlock(&rx->sta->lock);
return RX_QUEUED;
}
@@ -1950,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
break;
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- return ieee80211_sta_rx_mgmt(sdata, rx->skb);
-
switch (mgmt->u.action.u.addba_req.action_code) {
case WLAN_ACTION_ADDBA_REQ:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.addba_req)))
- return RX_DROP_MONITOR;
- ieee80211_process_addba_request(local, rx->sta, mgmt, len);
- goto handled;
+ goto invalid;
+ break;
case WLAN_ACTION_ADDBA_RESP:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.addba_resp)))
- break;
- ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
- goto handled;
+ goto invalid;
+ break;
case WLAN_ACTION_DELBA:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.delba)))
- break;
- ieee80211_process_delba(sdata, rx->sta, mgmt, len);
- goto handled;
+ goto invalid;
+ break;
+ default:
+ goto invalid;
}
- break;
+
+ goto queue;
case WLAN_CATEGORY_SPECTRUM_MGMT:
if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
break;
@@ -2003,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
break;
- return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+ goto queue;
}
break;
case WLAN_CATEGORY_SA_QUERY:
@@ -2021,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
case WLAN_CATEGORY_MESH_PLINK:
case WLAN_CATEGORY_MESH_PATH_SEL:
- if (ieee80211_vif_is_mesh(&sdata->vif))
- return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
- break;
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ goto queue;
}
+ invalid:
/*
* For AP mode, hostapd is responsible for handling any action
* frames that we didn't handle, including returning unknown
@@ -2045,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
*/
status = IEEE80211_SKB_RXCB(rx->skb);
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- cfg80211_rx_action(rx->sdata->dev, status->freq,
+ if (cfg80211_rx_action(rx->sdata->dev, status->freq,
rx->skb->data, rx->skb->len,
GFP_ATOMIC))
goto handled;
@@ -2058,11 +2056,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
GFP_ATOMIC);
if (nskb) {
- struct ieee80211_mgmt *mgmt = (void *)nskb->data;
+ struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
- mgmt->u.action.category |= 0x80;
- memcpy(mgmt->da, mgmt->sa, ETH_ALEN);
- memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
+ nmgmt->u.action.category |= 0x80;
+ memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
+ memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
memset(nskb->cb, 0, sizeof(nskb->cb));
@@ -2074,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
+
+ queue:
+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_packets++;
+ return RX_QUEUED;
}
static ieee80211_rx_result debug_noinline
@@ -2081,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
ieee80211_rx_result rxs;
+ struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+ __le16 stype;
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_MONITOR;
+ if (rx->skb->len < 24)
+ return RX_DROP_MONITOR;
+
if (ieee80211_drop_unencrypted_mgmt(rx))
return RX_DROP_UNUSABLE;
@@ -2092,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
if (rxs != RX_CONTINUE)
return rxs;
- if (ieee80211_vif_is_mesh(&sdata->vif))
- return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+ stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
+ if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_DROP_MONITOR;
+
+ switch (stype) {
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ /* process for all: mesh, mlme, ibss */
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ /* process only for station */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_DROP_MONITOR;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ /* process only for ibss */
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ return RX_DROP_MONITOR;
+ break;
+ default:
+ return RX_DROP_MONITOR;
+ }
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+ /* queue up frame and kick off work to process it */
+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&rx->local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_packets++;
- return RX_DROP_MONITOR;
+ return RX_QUEUED;
}
static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
@@ -2207,7 +2244,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = prev_dev;
- netif_rx(skb2);
+ netif_receive_skb(skb2);
}
}
@@ -2218,7 +2255,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
if (prev_dev) {
skb->dev = prev_dev;
- netif_rx(skb);
+ netif_receive_skb(skb);
skb = NULL;
} else
goto out_free_skb;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index e1b0be7..439c98d 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -286,6 +286,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
local->scanning = 0;
local->scan_channel = NULL;
+ drv_sw_scan_complete(local);
+
/* we only have to protect scan_req and hw/sw scan */
mutex_unlock(&local->scan_mtx);
@@ -295,8 +297,6 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
ieee80211_configure_filter(local);
- drv_sw_scan_complete(local);
-
ieee80211_offchannel_return(local, true);
done:
@@ -734,7 +734,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
- enum nl80211_band band;
+ enum ieee80211_band band;
mutex_lock(&local->scan_mtx);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4607da9..67656cb 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -235,6 +235,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
spin_lock_init(&sta->lock);
spin_lock_init(&sta->flaglock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+ INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+ mutex_init(&sta->ampdu_mlme.mtx);
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
@@ -246,14 +248,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
}
for (i = 0; i < STA_TID_NUM; i++) {
- /* timer_to_tid must be initialized with identity mapping to
- * enable session_timer's data differentiation. refer to
- * sta_rx_agg_session_timer_expired for useage */
+ /*
+ * timer_to_tid must be initialized with identity mapping
+ * to enable session_timer's data differentiation. See
+ * sta_rx_agg_session_timer_expired for usage.
+ */
sta->timer_to_tid[i] = i;
- /* tx */
- sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
- sta->ampdu_mlme.tid_tx[i] = NULL;
- sta->ampdu_mlme.addba_req_num[i] = 0;
}
skb_queue_head_init(&sta->ps_tx_buf);
skb_queue_head_init(&sta->tx_filtered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 813da34..54262e7 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,9 +42,6 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
- * @WLAN_STA_DISASSOC: Disassociation in progress.
- * This is used to reject TX BA session requests when disassociation
- * is in progress.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH = 1<<0,
@@ -60,38 +57,44 @@ enum ieee80211_sta_info_flags {
WLAN_STA_BLOCK_BA = 1<<11,
WLAN_STA_PS_DRIVER = 1<<12,
WLAN_STA_PSPOLL = 1<<13,
- WLAN_STA_DISASSOC = 1<<14,
};
#define STA_TID_NUM 16
#define ADDBA_RESP_INTERVAL HZ
-#define HT_AGG_MAX_RETRIES (0x3)
+#define HT_AGG_MAX_RETRIES 0x3
-#define HT_AGG_STATE_INITIATOR_SHIFT (4)
-
-#define HT_ADDBA_REQUESTED_MSK BIT(0)
-#define HT_ADDBA_DRV_READY_MSK BIT(1)
-#define HT_ADDBA_RECEIVED_MSK BIT(2)
-#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3)
-#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT)
-#define HT_AGG_STATE_IDLE (0x0)
-#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
- HT_ADDBA_DRV_READY_MSK | \
- HT_ADDBA_RECEIVED_MSK)
+#define HT_AGG_STATE_DRV_READY 0
+#define HT_AGG_STATE_RESPONSE_RECEIVED 1
+#define HT_AGG_STATE_OPERATIONAL 2
+#define HT_AGG_STATE_STOPPING 3
+#define HT_AGG_STATE_WANT_START 4
+#define HT_AGG_STATE_WANT_STOP 5
/**
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
+ * @rcu_head: rcu head for freeing structure
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
- * @ssn: Starting Sequence Number expected to be aggregated.
* @dialog_token: dialog token for aggregation session
+ * @state: session state (see above)
+ * @stop_initiator: initiator of a session stop
+ *
+ * This structure is protected by RCU and the per-station
+ * spinlock. Assignments to the array holding it must hold
+ * the spinlock, only the TX path can access it under RCU
+ * lock-free if, and only if, the state has the flag
+ * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path
+ * must also acquire the spinlock and re-check the state,
+ * see comments in the tx code touching it.
*/
struct tid_ampdu_tx {
+ struct rcu_head rcu_head;
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
- u16 ssn;
+ unsigned long state;
u8 dialog_token;
+ u8 stop_initiator;
};
/**
@@ -106,8 +109,18 @@ struct tid_ampdu_tx {
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
* @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ *
+ * This structure is protected by RCU and the per-station
+ * spinlock. Assignments to the array holding it must hold
+ * the spinlock, only the RX path can access it under RCU
+ * lock-free. The RX path, since it is single-threaded,
+ * can even modify the structure without locking since the
+ * only other modifications to it are done when the struct
+ * can not yet or no longer be found by the RX path.
*/
struct tid_ampdu_rx {
+ struct rcu_head rcu_head;
struct sk_buff **reorder_buf;
unsigned long *reorder_time;
struct timer_list session_timer;
@@ -122,19 +135,23 @@ struct tid_ampdu_rx {
/**
* struct sta_ampdu_mlme - STA aggregation information.
*
- * @tid_active_rx: TID's state in Rx session state machine.
- * @tid_rx: aggregation info for Rx per TID
- * @tid_state_tx: TID's state in Tx session state machine.
+ * @tid_rx: aggregation info for Rx per TID -- RCU protected
* @tid_tx: aggregation info for Tx per TID
* @addba_req_num: number of times addBA request has been sent.
* @dialog_token_allocator: dialog token enumerator for each new session;
+ * @work: work struct for starting/stopping aggregation
+ * @tid_rx_timer_expired: bitmap indicating on which TIDs the
+ * RX timer expired until the work for it runs
+ * @mtx: mutex to protect all TX data (except non-NULL assignments
+ * to tid_tx[idx], which are protected by the sta spinlock)
*/
struct sta_ampdu_mlme {
+ struct mutex mtx;
/* rx */
- bool tid_active_rx[STA_TID_NUM];
struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
/* tx */
- u8 tid_state_tx[STA_TID_NUM];
+ struct work_struct work;
struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
u8 addba_req_num[STA_TID_NUM];
u8 dialog_token_allocator;
@@ -410,20 +427,20 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
{
}
-#define for_each_sta_info(local, _addr, sta, nxt) \
+#define for_each_sta_info(local, _addr, _sta, nxt) \
for ( /* initialise loop */ \
- sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
- nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
+ _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
/* typecheck */ \
- for_each_sta_info_type_check(local, (_addr), sta, nxt), \
+ for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
/* continue condition */ \
- sta; \
+ _sta; \
/* advance loop */ \
- sta = nxt, \
- nxt = sta ? rcu_dereference(sta->hnext) : NULL \
+ _sta = nxt, \
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
/* compare address and run code only if it matches */ \
- if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
+ if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
/*
* Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 34da679..10caec5 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -377,7 +377,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = prev_dev;
- netif_rx(skb2);
+ netif_receive_skb(skb2);
}
}
@@ -386,7 +386,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (prev_dev) {
skb->dev = prev_dev;
- netif_rx(skb);
+ netif_receive_skb(skb);
skb = NULL;
}
rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 680bcb7..698d471 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1092,6 +1092,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
return true;
}
+static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+ struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct tid_ampdu_tx *tid_tx,
+ int tid)
+{
+ bool queued = false;
+
+ if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+ /*
+ * nothing -- this aggregation session is being started
+ * but that might still fail with the driver
+ */
+ } else {
+ spin_lock(&tx->sta->lock);
+ /*
+ * Need to re-check now, because we may get here
+ *
+ * 1) in the window during which the setup is actually
+ * already done, but not marked yet because not all
+ * packets are spliced over to the driver pending
+ * queue yet -- if this happened we acquire the lock
+ * either before or after the splice happens, but
+ * need to recheck which of these cases happened.
+ *
+ * 2) during session teardown, if the OPERATIONAL bit
+ * was cleared due to the teardown but the pointer
+ * hasn't been assigned NULL yet (or we loaded it
+ * before it was assigned) -- in this case it may
+ * now be NULL which means we should just let the
+ * packet pass through because splicing the frames
+ * back is already done.
+ */
+ tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
+
+ if (!tid_tx) {
+ /* do nothing, let packet pass through */
+ } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ } else {
+ queued = true;
+ info->control.vif = &tx->sdata->vif;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ __skb_queue_tail(&tid_tx->pending, skb);
+ }
+ spin_unlock(&tx->sta->lock);
+ }
+
+ return queued;
+}
+
/*
* initialises @tx
*/
@@ -1104,8 +1157,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, tid;
- u8 *qc, *state;
- bool queued = false;
+ u8 *qc;
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
@@ -1157,35 +1209,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- spin_lock(&tx->sta->lock);
- /*
- * XXX: This spinlock could be fairly expensive, but see the
- * comment in agg-tx.c:ieee80211_agg_tx_operational().
- * One way to solve this would be to do something RCU-like
- * for managing the tid_tx struct and using atomic bitops
- * for the actual state -- by introducing an actual
- * 'operational' bit that would be possible. It would
- * require changing ieee80211_agg_tx_operational() to
- * set that bit, and changing the way tid_tx is managed
- * everywhere, including races between that bit and
- * tid_tx going away (tid_tx being added can be easily
- * committed to memory before the 'operational' bit).
- */
- tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
- state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
- if (*state == HT_AGG_STATE_OPERATIONAL) {
- info->flags |= IEEE80211_TX_CTL_AMPDU;
- } else if (*state != HT_AGG_STATE_IDLE) {
- /* in progress */
- queued = true;
- info->control.vif = &sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- __skb_queue_tail(&tid_tx->pending, skb);
- }
- spin_unlock(&tx->sta->lock);
+ tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+ if (tid_tx) {
+ bool queued;
- if (unlikely(queued))
- return TX_QUEUED;
+ queued = ieee80211_tx_prep_agg(tx, skb, info,
+ tid_tx, tid);
+
+ if (unlikely(queued))
+ return TX_QUEUED;
+ }
}
if (is_multicast_ether_addr(hdr->addr1)) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5b79d55..a54cf14 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1138,18 +1138,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
mutex_unlock(&local->sta_mtx);
- /* Clear Suspend state so that ADDBA requests can be processed */
-
- rcu_read_lock();
-
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
- }
- }
-
- rcu_read_unlock();
-
/* setup RTS threshold */
drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
@@ -1202,13 +1190,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
- rcu_read_lock();
+ /*
+ * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
+ * sessions can be established after a resume.
+ *
+ * Also tear down aggregation sessions since reconfiguring
+ * them in a hardware restart scenario is not easily done
+ * right now, and the hardware will have lost information
+ * about the sessions, but we and the AP still think they
+ * are active. This is really a workaround though.
+ */
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ mutex_lock(&local->sta_mtx);
+
+ list_for_each_entry(sta, &local->sta_list, list) {
ieee80211_sta_tear_down_BA_sessions(sta);
+ clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
}
+
+ mutex_unlock(&local->sta_mtx);
}
- rcu_read_unlock();
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 4157717..c22a71c 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -715,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
struct ieee80211_work *wk;
- enum work_action rma;
+ enum work_action rma = WORK_ACT_NONE;
u16 fc;
rx_status = (struct ieee80211_rx_status *) skb->cb;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0adbcc9..a14e677 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -436,6 +436,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[CCMP_PN_LEN];
int data_len;
+ int queue;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -453,7 +454,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
ccmp_hdr2pn(pn, skb->data + hdrlen);
- if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) {
+ queue = ieee80211_is_mgmt(hdr->frame_control) ?
+ NUM_RX_DATA_QUEUES : rx->queue;
+
+ if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
key->u.ccmp.replays++;
return RX_DROP_UNUSABLE;
}
@@ -470,7 +474,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
- memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN);
+ memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
/* Remove CCMP header and MIC */
skb_trim(skb, skb->len - CCMP_MIC_LEN);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 717e623..654544e 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -165,6 +165,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -177,6 +178,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
ret = 0;
}
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
@@ -196,6 +198,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
ct_write_lock(hash);
+ spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
list_del(&cp->c_list);
@@ -205,6 +208,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
} else
ret = 0;
+ spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 9dd8cd4..802dbff 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -736,27 +736,19 @@ static bool tcp_in_window(const struct nf_conn *ct,
return res;
}
-#define TH_FIN 0x01
-#define TH_SYN 0x02
-#define TH_RST 0x04
-#define TH_PUSH 0x08
-#define TH_ACK 0x10
-#define TH_URG 0x20
-#define TH_ECE 0x40
-#define TH_CWR 0x80
-
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
-static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
+static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
+ TCPHDR_URG) + 1] =
{
- [TH_SYN] = 1,
- [TH_SYN|TH_URG] = 1,
- [TH_SYN|TH_ACK] = 1,
- [TH_RST] = 1,
- [TH_RST|TH_ACK] = 1,
- [TH_FIN|TH_ACK] = 1,
- [TH_FIN|TH_ACK|TH_URG] = 1,
- [TH_ACK] = 1,
- [TH_ACK|TH_URG] = 1,
+ [TCPHDR_SYN] = 1,
+ [TCPHDR_SYN|TCPHDR_URG] = 1,
+ [TCPHDR_SYN|TCPHDR_ACK] = 1,
+ [TCPHDR_RST] = 1,
+ [TCPHDR_RST|TCPHDR_ACK] = 1,
+ [TCPHDR_FIN|TCPHDR_ACK] = 1,
+ [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
+ [TCPHDR_ACK] = 1,
+ [TCPHDR_ACK|TCPHDR_URG] = 1,
};
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
@@ -803,7 +795,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
}
/* Check TCP flags. */
- tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH));
+ tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fb86a51..6a1572b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -413,8 +413,9 @@ __build_packet_message(struct nfulnl_instance *inst,
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
- htonl(indev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex));
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
@@ -440,8 +441,9 @@ __build_packet_message(struct nfulnl_instance *inst,
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
- htonl(outdev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
} else {
/* Case 2: indev is a bridge group, we need to look
* for physical device (when called from ipv4) */
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index d05605b..68e67d1 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -291,8 +291,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by __nf_queue */
NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
- htonl(indev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex));
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
@@ -316,8 +317,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by __nf_queue */
NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
- htonl(outdev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
} else {
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 1841388..eb81c38 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -220,15 +220,13 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
}
#endif
-#define TH_SYN 0x02
-
/* Must specify -p tcp --syn */
static inline bool find_syn_match(const struct xt_entry_match *m)
{
const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
- tcpinfo->flg_cmp & TH_SYN &&
+ tcpinfo->flg_cmp & TCPHDR_SYN &&
!(tcpinfo->invflags & XT_TCP_INV_FLAGS))
return true;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a2eb965..7aeaa83 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1323,19 +1323,23 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
- if (NULL == siocb->scm)
+ if (NULL == siocb->scm) {
siocb->scm = &scm;
+ memset(&scm, 0, sizeof(scm));
+ }
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
if (msg->msg_namelen) {
+ err = -EINVAL;
if (addr->nl_family != AF_NETLINK)
- return -EINVAL;
+ goto out;
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
+ err = -EPERM;
if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
- return -EPERM;
+ goto out;
} else {
dst_pid = nlk->dst_pid;
dst_group = nlk->dst_group;
@@ -1387,6 +1391,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
out:
+ scm_destroy(siocb->scm);
return err;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c0b6863..a16b017 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -160,6 +160,8 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
spin_lock(&m->tcf_lock);
m->tcf_tm.lastuse = jiffies;
+ m->tcf_bstats.bytes += qdisc_pkt_len(skb);
+ m->tcf_bstats.packets++;
dev = m->tcfm_dev;
if (!(dev->flags & IFF_UP)) {
@@ -169,13 +171,11 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
goto out;
}
- skb2 = skb_act_clone(skb, GFP_ATOMIC);
+ at = G_TC_AT(skb->tc_verd);
+ skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action);
if (skb2 == NULL)
goto out;
- m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
- m->tcf_bstats.packets++;
- at = G_TC_AT(skb->tc_verd);
if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
skb_push(skb2, skb2->dev->hard_header_len);
@@ -185,16 +185,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
- skb2->dev = dev;
skb2->skb_iif = skb->dev->ifindex;
+ skb2->dev = dev;
dev_queue_xmit(skb2);
err = 0;
out:
if (err) {
m->tcf_qstats.overlimits++;
- m->tcf_bstats.bytes += qdisc_pkt_len(skb);
- m->tcf_bstats.packets++;
/* should we be asking for packet to be dropped?
* may make sense for redirect case only
*/
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 5709494..0be49a4 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -265,40 +265,29 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = a->priv;
- struct tc_nat *opt;
+ struct tc_nat opt;
struct tcf_t t;
- int s;
- s = sizeof(*opt);
+ opt.old_addr = p->old_addr;
+ opt.new_addr = p->new_addr;
+ opt.mask = p->mask;
+ opt.flags = p->flags;
- /* netlink spinlocks held above us - must use ATOMIC */
- opt = kzalloc(s, GFP_ATOMIC);
- if (unlikely(!opt))
- return -ENOBUFS;
+ opt.index = p->tcf_index;
+ opt.action = p->tcf_action;
+ opt.refcnt = p->tcf_refcnt - ref;
+ opt.bindcnt = p->tcf_bindcnt - bind;
- opt->old_addr = p->old_addr;
- opt->new_addr = p->new_addr;
- opt->mask = p->mask;
- opt->flags = p->flags;
-
- opt->index = p->tcf_index;
- opt->action = p->tcf_action;
- opt->refcnt = p->tcf_refcnt - ref;
- opt->bindcnt = p->tcf_bindcnt - bind;
-
- NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
+ NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
- kfree(opt);
-
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
- kfree(opt);
return -1;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3415b6c..807643b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev)
dev->tx_queue_len = 100;
dev->flags = IFF_NOARP;
dev->hard_header_len = LL_MAX_HEADER;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
static LIST_HEAD(master_dev_list);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a0e1a7f..c0e162a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1002,7 +1002,8 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
static inline int init_sctp_mibs(void)
{
return snmp_mib_init((void __percpu **)sctp_statistics,
- sizeof(struct sctp_mib));
+ sizeof(struct sctp_mib),
+ __alignof__(struct sctp_mib));
}
static inline void cleanup_sctp_mibs(void)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index fef2cc5..75ba48b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -282,7 +282,7 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
return s;
}
-static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
+static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
struct hlist_node *node;
@@ -292,9 +292,6 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->dentry;
- if (!net_eq(sock_net(s), net))
- continue;
-
if (dentry && dentry->d_inode == i) {
sock_hold(s);
goto found;
@@ -450,11 +447,31 @@ static int unix_release_sock(struct sock *sk, int embrion)
return 0;
}
+static void init_peercred(struct sock *sk)
+{
+ put_pid(sk->sk_peer_pid);
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
+}
+
+static void copy_peercred(struct sock *sk, struct sock *peersk)
+{
+ put_pid(sk->sk_peer_pid);
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+}
+
static int unix_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
+ struct pid *old_pid = NULL;
+ const struct cred *old_cred = NULL;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -470,12 +487,14 @@ static int unix_listen(struct socket *sock, int backlog)
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
- sk->sk_peercred.pid = task_tgid_vnr(current);
- current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
+ init_peercred(sk);
err = 0;
out_unlock:
unix_state_unlock(sk);
+ put_pid(old_pid);
+ if (old_cred)
+ put_cred(old_cred);
out:
return err;
}
@@ -736,7 +755,7 @@ static struct sock *unix_find_other(struct net *net,
err = -ECONNREFUSED;
if (!S_ISSOCK(inode->i_mode))
goto put_fail;
- u = unix_find_socket_byinode(net, inode);
+ u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
@@ -1140,8 +1159,7 @@ restart:
unix_peer(newsk) = sk;
newsk->sk_state = TCP_ESTABLISHED;
newsk->sk_type = sk->sk_type;
- newsk->sk_peercred.pid = task_tgid_vnr(current);
- current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
+ init_peercred(newsk);
newu = unix_sk(newsk);
newsk->sk_wq = &newu->peer_wq;
otheru = unix_sk(other);
@@ -1157,7 +1175,7 @@ restart:
}
/* Set credentials */
- sk->sk_peercred = other->sk_peercred;
+ copy_peercred(sk, other);
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
@@ -1199,10 +1217,8 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
sock_hold(skb);
unix_peer(ska) = skb;
unix_peer(skb) = ska;
- ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
- current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
- ska->sk_peercred.uid = skb->sk_peercred.uid;
- ska->sk_peercred.gid = skb->sk_peercred.gid;
+ init_peercred(ska);
+ init_peercred(skb);
if (ska->sk_type != SOCK_DGRAM) {
ska->sk_state = TCP_ESTABLISHED;
@@ -1297,18 +1313,20 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
int i;
scm->fp = UNIXCB(skb).fp;
- skb->destructor = sock_wfree;
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->fp[i]);
}
-static void unix_destruct_fds(struct sk_buff *skb)
+static void unix_destruct_scm(struct sk_buff *skb)
{
struct scm_cookie scm;
memset(&scm, 0, sizeof(scm));
- unix_detach_fds(&scm, skb);
+ scm.pid = UNIXCB(skb).pid;
+ scm.cred = UNIXCB(skb).cred;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
/* Alas, it calls VFS */
/* So fscking what? fput() had been SMP-safe since the last Summer */
@@ -1331,10 +1349,22 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
for (i = scm->fp->count-1; i >= 0; i--)
unix_inflight(scm->fp->fp[i]);
- skb->destructor = unix_destruct_fds;
return 0;
}
+static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
+{
+ int err = 0;
+ UNIXCB(skb).pid = get_pid(scm->pid);
+ UNIXCB(skb).cred = get_cred(scm->cred);
+ UNIXCB(skb).fp = NULL;
+ if (scm->fp && send_fds)
+ err = unix_attach_fds(scm, skb);
+
+ skb->destructor = unix_destruct_scm;
+ return err;
+}
+
/*
* Send AF_UNIX data.
*/
@@ -1391,12 +1421,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (skb == NULL)
goto out;
- memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp) {
- err = unix_attach_fds(siocb->scm, skb);
- if (err)
- goto out_free;
- }
+ err = unix_scm_to_skb(siocb->scm, skb, true);
+ if (err)
+ goto out_free;
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
@@ -1566,16 +1593,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
*/
size = min_t(int, size, skb_tailroom(skb));
- memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+
/* Only send the fds in the first buffer */
- if (siocb->scm->fp && !fds_sent) {
- err = unix_attach_fds(siocb->scm, skb);
- if (err) {
- kfree_skb(skb);
- goto out_err;
- }
- fds_sent = true;
+ err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
}
+ fds_sent = true;
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
@@ -1692,7 +1717,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
siocb->scm = &tmp_scm;
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- siocb->scm->creds = *UNIXCREDS(skb);
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK)) {
@@ -1841,14 +1866,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (check_creds) {
/* Never glue messages from different writers */
- if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
- sizeof(siocb->scm->creds)) != 0) {
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+ (UNIXCB(skb).cred != siocb->scm->cred)) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} else {
/* Copy credentials */
- siocb->scm->creds = *UNIXCREDS(skb);
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
check_creds = 1;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 37d0e0a..47fcfd0 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -894,7 +894,7 @@ out_fail_pernet:
}
subsys_initcall(cfg80211_init);
-static void cfg80211_exit(void)
+static void __exit cfg80211_exit(void)
{
debugfs_remove(ieee80211_debugfs_dir);
nl80211_exit();
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f69ae19..9f95354 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -846,8 +846,9 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
if (!wdev->current_bss ||
memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
ETH_ALEN) != 0 ||
- memcmp(wdev->current_bss->pub.bssid, mgmt->da,
- ETH_ALEN) != 0)
+ (wdev->iftype == NL80211_IFTYPE_STATION &&
+ memcmp(wdev->current_bss->pub.bssid, mgmt->da,
+ ETH_ALEN) != 0))
return -ENOTCONN;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 90ab3c8..85285b4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -153,6 +153,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
[NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
[NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
+
+ [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
+ [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
};
/* policy for the attributes */
@@ -869,6 +872,34 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
+ enum nl80211_tx_power_setting type;
+ int idx, mbm = 0;
+
+ if (!rdev->ops->set_tx_power) {
+ return -EOPNOTSUPP;
+ goto bad_res;
+ }
+
+ idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING;
+ type = nla_get_u32(info->attrs[idx]);
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] &&
+ (type != NL80211_TX_POWER_AUTOMATIC)) {
+ result = -EINVAL;
+ goto bad_res;
+ }
+
+ if (type != NL80211_TX_POWER_AUTOMATIC) {
+ idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL;
+ mbm = nla_get_u32(info->attrs[idx]);
+ }
+
+ result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm);
+ if (result)
+ goto bad_res;
+ }
+
changed = 0;
if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
@@ -1107,7 +1138,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype)
{
if (!use_4addr) {
- if (netdev && netdev->br_port)
+ if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
return -EBUSY;
return 0;
}
@@ -3955,6 +3986,55 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
}
}
+ if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
+ u8 *rates =
+ nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+ int n_rates =
+ nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+ struct ieee80211_supported_band *sband =
+ wiphy->bands[ibss.channel->band];
+ int i, j;
+
+ if (n_rates == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < n_rates; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ bool found = false;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].bitrate == rate) {
+ found = true;
+ ibss.basic_rates |= BIT(j);
+ break;
+ }
+ }
+ if (!found) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ } else {
+ /*
+ * If no rates were explicitly configured,
+ * use the mandatory rate set for 11b or
+ * 11a for maximum compatibility.
+ */
+ struct ieee80211_supported_band *sband =
+ wiphy->bands[ibss.channel->band];
+ int j;
+ u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ?
+ IEEE80211_RATE_MANDATORY_A :
+ IEEE80211_RATE_MANDATORY_B;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].flags & flag)
+ ibss.basic_rates |= BIT(j);
+ }
+ }
+
err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
out:
@@ -4653,7 +4733,8 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4703,7 +4784,8 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
err = -EOPNOTSUPP;
goto out;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 8f0d97d..1ac2bdd 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -80,7 +80,7 @@ static const struct ieee80211_regdomain *country_ie_regdomain;
* - country_ie_regdomain
* - last_request
*/
-DEFINE_MUTEX(reg_mutex);
+static DEFINE_MUTEX(reg_mutex);
#define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex))
/* Used to queue up regulatory hints */
@@ -2630,7 +2630,7 @@ out:
mutex_unlock(&reg_mutex);
}
-int regulatory_init(void)
+int __init regulatory_init(void)
{
int err = 0;
@@ -2676,7 +2676,7 @@ int regulatory_init(void)
return 0;
}
-void regulatory_exit(void)
+void /* __init_or_exit */ regulatory_exit(void)
{
struct regulatory_request *reg_request, *tmp;
struct reg_beacon *reg_beacon, *btmp;
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b26224a..c4695d0 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -10,7 +10,7 @@ int regulatory_hint_user(const char *alpha2);
void reg_device_remove(struct wiphy *wiphy);
-int regulatory_init(void);
+int __init regulatory_init(void);
void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3416373..0c8a1e8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -770,8 +770,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return -EOPNOTSUPP;
/* if it's part of a bridge, reject changing type to station/ibss */
- if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC ||
- ntype == NL80211_IFTYPE_STATION))
+ if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
+ (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION))
return -EBUSY;
if (ntype != otype) {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 9634299..1ff1e9f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -829,7 +829,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
- enum tx_power_setting type;
+ enum nl80211_tx_power_setting type;
int dbm = 0;
if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -852,7 +852,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
if (data->txpower.value < 0)
return -EINVAL;
dbm = data->txpower.value;
- type = TX_POWER_FIXED;
+ type = NL80211_TX_POWER_FIXED;
/* TODO: do regulatory check! */
} else {
/*
@@ -860,10 +860,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
* passed in from userland.
*/
if (data->txpower.value < 0) {
- type = TX_POWER_AUTOMATIC;
+ type = NL80211_TX_POWER_AUTOMATIC;
} else {
dbm = data->txpower.value;
- type = TX_POWER_LIMITED;
+ type = NL80211_TX_POWER_LIMITED;
}
}
} else {
@@ -872,7 +872,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
return 0;
}
- return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);
+ return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4bf27d9..593c06b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2480,7 +2480,8 @@ static int __net_init xfrm_statistics_init(struct net *net)
int rv;
if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
- sizeof(struct linux_xfrm_mib)) < 0)
+ sizeof(struct linux_xfrm_mib),
+ __alignof__(struct linux_xfrm_mib)) < 0)
return -ENOMEM;
rv = xfrm_proc_init(net);
if (rv < 0)